comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
We are seeing more and more of this "test timing out after latest test proxy update". What are the cause of this time out? Is it test proxy problem, or our TestBase problem? Our current timeout seems to be 1min, which should be enough for playback? https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/resourcemanager/azure-resourcemanager-test/src/main/java/com/azure/resourcemanager/test/ResourceManagerTestProxyTestBase.java#L124
public void canSetMSIOnNewOrExistingVMWithoutRoleAssignment() throws Exception { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .create(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(virtualMachine.resourceGroupName()); PagedIterable<RoleAssignment> rgRoleAssignments1 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments1); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments1) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse(found, "Resource group should not have a role assignment with virtual machine MSI principal"); virtualMachine = virtualMachine.update().withSystemAssignedManagedServiceIdentity().apply(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); rgRoleAssignments1 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments1); found = false; for (RoleAssignment roleAssignment : rgRoleAssignments1) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse(found, "Resource group should not have a role assignment with virtual machine MSI principal"); }
public void canSetMSIOnNewOrExistingVMWithoutRoleAssignment() throws Exception { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .create(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(virtualMachine.resourceGroupName()); PagedIterable<RoleAssignment> rgRoleAssignments1 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments1); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments1) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse(found, "Resource group should not have a role assignment with virtual machine MSI principal"); virtualMachine = virtualMachine.update().withSystemAssignedManagedServiceIdentity().apply(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); rgRoleAssignments1 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments1); found = false; for (RoleAssignment roleAssignment : rgRoleAssignments1) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse(found, "Resource group should not have a role assignment with virtual machine MSI principal"); }
class VirtualMachineManagedServiceIdentityOperationsTests extends ComputeManagementTest { private String rgName = ""; private final Region region = Region.US_EAST; private final String vmName = "javavm"; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test @DoNotRecord(skipInPlayback = true) @Test public void canSetMSIOnNewVMWithRoleAssignedToCurrentResourceGroup() throws Exception { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .create(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(virtualMachine.resourceGroupName()); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); boolean found = false; for (RoleAssignment rgRoleAssignment : rgRoleAssignments) { if (rgRoleAssignment.principalId() != null && rgRoleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions.assertTrue(found, "Resource group should have a role assignment with virtual machine MSI principal"); } @Test public void canSetMSIOnNewVMWithMultipleRoleAssignments() throws Exception { String storageAccountName = generateRandomResourceName("javacsrg", 15); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageAccountName) .withRegion(Region.US_EAST2) .withNewResourceGroup(rgName) .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(storageAccount.resourceGroupName()); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessTo(resourceGroup.id(), BuiltInRole.CONTRIBUTOR) .withSystemAssignedIdentityBasedAccessTo(storageAccount.id(), BuiltInRole.CONTRIBUTOR) .create(); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions.assertTrue(found, "Resource group should have a role assignment with virtual machine MSI principal"); PagedIterable<RoleAssignment> stgRoleAssignments = authorizationManager.roleAssignments().listByScope(storageAccount.id()); Assertions.assertNotNull(stgRoleAssignments); found = false; for (RoleAssignment roleAssignment : stgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue(found, "Storage account should have a role assignment with virtual machine MSI principal"); } @Test @DoNotRecord(skipInPlayback = true) public void canSetMSIOnExistingVMWithRoleAssignments() throws Exception { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .create(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); Assertions.assertNotNull(virtualMachine.managedServiceIdentityType()); Assertions.assertTrue(virtualMachine.managedServiceIdentityType().equals(ResourceIdentityType.SYSTEM_ASSIGNED)); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(virtualMachine.resourceGroupName()); PagedIterable<RoleAssignment> rgRoleAssignments1 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments1); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments1) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse(found, "Resource group should not have a role assignment with virtual machine MSI principal"); virtualMachine .update() .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .apply(); PagedIterable<RoleAssignment> roleAssignments2 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(roleAssignments2); for (RoleAssignment roleAssignment : roleAssignments2) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions.assertTrue(found, "Resource group should have a role assignment with virtual machine MSI principal"); } private static Integer objectToInteger(Object obj) { Integer result = null; if (obj != null) { if (obj instanceof Integer) { result = (Integer) obj; } else { result = Integer.valueOf((String) obj); } } return result; } }
class VirtualMachineManagedServiceIdentityOperationsTests extends ComputeManagementTest { private String rgName = ""; private final Region region = Region.US_EAST; private final String vmName = "javavm"; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test @DoNotRecord(skipInPlayback = true) @Test public void canSetMSIOnNewVMWithRoleAssignedToCurrentResourceGroup() throws Exception { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .create(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(virtualMachine.resourceGroupName()); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); boolean found = false; for (RoleAssignment rgRoleAssignment : rgRoleAssignments) { if (rgRoleAssignment.principalId() != null && rgRoleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions.assertTrue(found, "Resource group should have a role assignment with virtual machine MSI principal"); } @Test public void canSetMSIOnNewVMWithMultipleRoleAssignments() throws Exception { String storageAccountName = generateRandomResourceName("javacsrg", 15); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageAccountName) .withRegion(Region.US_EAST2) .withNewResourceGroup(rgName) .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(storageAccount.resourceGroupName()); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessTo(resourceGroup.id(), BuiltInRole.CONTRIBUTOR) .withSystemAssignedIdentityBasedAccessTo(storageAccount.id(), BuiltInRole.CONTRIBUTOR) .create(); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions.assertTrue(found, "Resource group should have a role assignment with virtual machine MSI principal"); PagedIterable<RoleAssignment> stgRoleAssignments = authorizationManager.roleAssignments().listByScope(storageAccount.id()); Assertions.assertNotNull(stgRoleAssignments); found = false; for (RoleAssignment roleAssignment : stgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue(found, "Storage account should have a role assignment with virtual machine MSI principal"); } @Test @DoNotRecord(skipInPlayback = true) public void canSetMSIOnExistingVMWithRoleAssignments() throws Exception { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .create(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); Assertions.assertNotNull(virtualMachine.managedServiceIdentityType()); Assertions.assertTrue(virtualMachine.managedServiceIdentityType().equals(ResourceIdentityType.SYSTEM_ASSIGNED)); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(virtualMachine.resourceGroupName()); PagedIterable<RoleAssignment> rgRoleAssignments1 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments1); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments1) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse(found, "Resource group should not have a role assignment with virtual machine MSI principal"); virtualMachine .update() .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .apply(); PagedIterable<RoleAssignment> roleAssignments2 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(roleAssignments2); for (RoleAssignment roleAssignment : roleAssignments2) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions.assertTrue(found, "Resource group should have a role assignment with virtual machine MSI principal"); } private static Integer objectToInteger(Object obj) { Integer result = null; if (obj != null) { if (obj instanceof Integer) { result = (Integer) obj; } else { result = Integer.valueOf((String) obj); } } return result; } }
@weidongxu-microsoft At present, I have found that when deserializing large lists and complex structure lists during CI testing, timeout exceptions will occur in almost all test environments. However, these test cases can successfully executed for PLAYBACK at location environment. So I think this is not just a problem with the test proxy or TestBase.
public void canSetMSIOnNewOrExistingVMWithoutRoleAssignment() throws Exception { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .create(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(virtualMachine.resourceGroupName()); PagedIterable<RoleAssignment> rgRoleAssignments1 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments1); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments1) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse(found, "Resource group should not have a role assignment with virtual machine MSI principal"); virtualMachine = virtualMachine.update().withSystemAssignedManagedServiceIdentity().apply(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); rgRoleAssignments1 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments1); found = false; for (RoleAssignment roleAssignment : rgRoleAssignments1) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse(found, "Resource group should not have a role assignment with virtual machine MSI principal"); }
public void canSetMSIOnNewOrExistingVMWithoutRoleAssignment() throws Exception { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .create(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(virtualMachine.resourceGroupName()); PagedIterable<RoleAssignment> rgRoleAssignments1 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments1); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments1) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse(found, "Resource group should not have a role assignment with virtual machine MSI principal"); virtualMachine = virtualMachine.update().withSystemAssignedManagedServiceIdentity().apply(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); rgRoleAssignments1 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments1); found = false; for (RoleAssignment roleAssignment : rgRoleAssignments1) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse(found, "Resource group should not have a role assignment with virtual machine MSI principal"); }
class VirtualMachineManagedServiceIdentityOperationsTests extends ComputeManagementTest { private String rgName = ""; private final Region region = Region.US_EAST; private final String vmName = "javavm"; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test @DoNotRecord(skipInPlayback = true) @Test public void canSetMSIOnNewVMWithRoleAssignedToCurrentResourceGroup() throws Exception { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .create(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(virtualMachine.resourceGroupName()); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); boolean found = false; for (RoleAssignment rgRoleAssignment : rgRoleAssignments) { if (rgRoleAssignment.principalId() != null && rgRoleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions.assertTrue(found, "Resource group should have a role assignment with virtual machine MSI principal"); } @Test public void canSetMSIOnNewVMWithMultipleRoleAssignments() throws Exception { String storageAccountName = generateRandomResourceName("javacsrg", 15); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageAccountName) .withRegion(Region.US_EAST2) .withNewResourceGroup(rgName) .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(storageAccount.resourceGroupName()); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessTo(resourceGroup.id(), BuiltInRole.CONTRIBUTOR) .withSystemAssignedIdentityBasedAccessTo(storageAccount.id(), BuiltInRole.CONTRIBUTOR) .create(); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions.assertTrue(found, "Resource group should have a role assignment with virtual machine MSI principal"); PagedIterable<RoleAssignment> stgRoleAssignments = authorizationManager.roleAssignments().listByScope(storageAccount.id()); Assertions.assertNotNull(stgRoleAssignments); found = false; for (RoleAssignment roleAssignment : stgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue(found, "Storage account should have a role assignment with virtual machine MSI principal"); } @Test @DoNotRecord(skipInPlayback = true) public void canSetMSIOnExistingVMWithRoleAssignments() throws Exception { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .create(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); Assertions.assertNotNull(virtualMachine.managedServiceIdentityType()); Assertions.assertTrue(virtualMachine.managedServiceIdentityType().equals(ResourceIdentityType.SYSTEM_ASSIGNED)); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(virtualMachine.resourceGroupName()); PagedIterable<RoleAssignment> rgRoleAssignments1 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments1); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments1) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse(found, "Resource group should not have a role assignment with virtual machine MSI principal"); virtualMachine .update() .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .apply(); PagedIterable<RoleAssignment> roleAssignments2 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(roleAssignments2); for (RoleAssignment roleAssignment : roleAssignments2) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions.assertTrue(found, "Resource group should have a role assignment with virtual machine MSI principal"); } private static Integer objectToInteger(Object obj) { Integer result = null; if (obj != null) { if (obj instanceof Integer) { result = (Integer) obj; } else { result = Integer.valueOf((String) obj); } } return result; } }
class VirtualMachineManagedServiceIdentityOperationsTests extends ComputeManagementTest { private String rgName = ""; private final Region region = Region.US_EAST; private final String vmName = "javavm"; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test @DoNotRecord(skipInPlayback = true) @Test public void canSetMSIOnNewVMWithRoleAssignedToCurrentResourceGroup() throws Exception { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .create(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(virtualMachine.resourceGroupName()); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); boolean found = false; for (RoleAssignment rgRoleAssignment : rgRoleAssignments) { if (rgRoleAssignment.principalId() != null && rgRoleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions.assertTrue(found, "Resource group should have a role assignment with virtual machine MSI principal"); } @Test public void canSetMSIOnNewVMWithMultipleRoleAssignments() throws Exception { String storageAccountName = generateRandomResourceName("javacsrg", 15); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageAccountName) .withRegion(Region.US_EAST2) .withNewResourceGroup(rgName) .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(storageAccount.resourceGroupName()); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessTo(resourceGroup.id(), BuiltInRole.CONTRIBUTOR) .withSystemAssignedIdentityBasedAccessTo(storageAccount.id(), BuiltInRole.CONTRIBUTOR) .create(); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions.assertTrue(found, "Resource group should have a role assignment with virtual machine MSI principal"); PagedIterable<RoleAssignment> stgRoleAssignments = authorizationManager.roleAssignments().listByScope(storageAccount.id()); Assertions.assertNotNull(stgRoleAssignments); found = false; for (RoleAssignment roleAssignment : stgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue(found, "Storage account should have a role assignment with virtual machine MSI principal"); } @Test @DoNotRecord(skipInPlayback = true) public void canSetMSIOnExistingVMWithRoleAssignments() throws Exception { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withSystemAssignedManagedServiceIdentity() .create(); Assertions.assertNotNull(virtualMachine); Assertions.assertNotNull(virtualMachine.innerModel()); Assertions.assertTrue(virtualMachine.isManagedServiceIdentityEnabled()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId()); Assertions.assertNotNull(virtualMachine.systemAssignedManagedServiceIdentityTenantId()); Assertions.assertNotNull(virtualMachine.managedServiceIdentityType()); Assertions.assertTrue(virtualMachine.managedServiceIdentityType().equals(ResourceIdentityType.SYSTEM_ASSIGNED)); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(virtualMachine.resourceGroupName()); PagedIterable<RoleAssignment> rgRoleAssignments1 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments1); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments1) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse(found, "Resource group should not have a role assignment with virtual machine MSI principal"); virtualMachine .update() .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .apply(); PagedIterable<RoleAssignment> roleAssignments2 = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(roleAssignments2); for (RoleAssignment roleAssignment : roleAssignments2) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachine.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions.assertTrue(found, "Resource group should have a role assignment with virtual machine MSI principal"); } private static Integer objectToInteger(Object obj) { Integer result = null; if (obj != null) { if (obj instanceof Integer) { result = (Integer) obj; } else { result = Integer.valueOf((String) obj); } } return result; } }
see if you need `@Suppress` on method, on cast.
protected AzureUser azureCliSignedInUser() { AzureUser azureCliUser = new AzureUser(testResourceNamer); if (!isPlaybackMode()) { String azCommand = "az ad signed-in-user show --output json"; final Pattern windowsProcessErrorMessage = Pattern.compile("'azd?' is not recognized"); final Pattern shProcessErrorMessage = Pattern.compile("azd?:.*not found"); try { String starter; String switcher; if (IdentityUtil.isWindowsPlatform()) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); builder.redirectInput(ProcessBuilder.Redirect.from(IdentityUtil.NULL_FILE)); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (windowsProcessErrorMessage.matcher(line).find() || shProcessErrorMessage.matcher(line).find()) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { if (processOutput.contains("az login") || processOutput.contains("az account set")) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException("get Azure CLI current signed-in user failed", null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Get Azure CLI signed-in user => A response was received from Azure CLI, deserializing the" + " response into an signed-in user."); try (JsonReader reader = JsonProviders.createReader(processOutput)) { Map<String, Object> signedInUserInfo = reader.readMap(JsonReader::readUntyped); String userPrincipalName = (String) signedInUserInfo.get("userPrincipalName"); String id = (String) signedInUserInfo.get("id"); azureCliUser = new AzureUser(testResourceNamer, id, userPrincipalName); } } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } } return azureCliUser; }
String userPrincipalName = (String) signedInUserInfo.get("userPrincipalName");
protected AzureUser azureCliSignedInUser() { AzureUser azureCliUser = new AzureUser(testResourceNamer); if (!isPlaybackMode()) { String azCommand = "az ad signed-in-user show --output json"; final Pattern windowsProcessErrorMessage = Pattern.compile("'azd?' is not recognized"); final Pattern shProcessErrorMessage = Pattern.compile("azd?:.*not found"); try { String starter; String switcher; if (IdentityUtil.isWindowsPlatform()) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); builder.redirectInput(ProcessBuilder.Redirect.from(IdentityUtil.NULL_FILE)); builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (windowsProcessErrorMessage.matcher(line).find() || shProcessErrorMessage.matcher(line).find()) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { if (processOutput.contains("az login") || processOutput.contains("az account set")) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException("get Azure CLI current signed-in user failed", null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Get Azure CLI signed-in user => A response was received from Azure CLI, deserializing the" + " response into an signed-in user."); try (JsonReader reader = JsonProviders.createReader(processOutput)) { Map<String, Object> signedInUserInfo = reader.readMap(JsonReader::readUntyped); String userPrincipalName = (String) signedInUserInfo.get("userPrincipalName"); String id = (String) signedInUserInfo.get("id"); azureCliUser = new AzureUser(testResourceNamer, id, userPrincipalName); } } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } } return azureCliUser; }
class ResourceManagerTestProxyTestBase extends TestProxyTestBase { private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000"; private static final String SUBSCRIPTION_ID_REGEX = "(?<=/subscriptions/)([^/?]+)"; private static final String ZERO_SUBSCRIPTION = ZERO_UUID; private static final String ZERO_TENANT = ZERO_UUID; private static final String PLAYBACK_URI_BASE = "https: private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION"; private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL"; private static final String HTTPS_PROXY_HOST = "https.proxyHost"; private static final String HTTPS_PROXY_PORT = "https.proxyPort"; private static final String HTTP_PROXY_HOST = "http.proxyHost"; private static final String HTTP_PROXY_PORT = "http.proxyPort"; private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies"; private static final String VALUE_TRUE = "true"; private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234"; private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile( ZERO_TENANT, ZERO_SUBSCRIPTION, new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values()) .collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI))) ); private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() { @Override public void write(int b) { } @Override public void write(byte[] b) { } @Override public void write(byte[] b, int off, int len) { } }; /** * Redacted value. */ protected static final String REDACTED_VALUE = "REDACTED"; private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class); private AzureProfile testProfile; private boolean isSkipInPlayback; private final List<TestProxySanitizer> sanitizers = new ArrayList<>(); /** * Sets upper bound execution timeout for each @Test method. * {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper * bound. */ @RegisterExtension final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60)); /** * Initializes ResourceManagerTestProxyTestBase class. */ protected ResourceManagerTestProxyTestBase() { } /** * Generates a random resource name. * * @param prefix Prefix for the resource name. * @param maxLen Maximum length of the resource name. * @return A randomly generated resource name with a given prefix and maximum length. */ protected String generateRandomResourceName(String prefix, int maxLen) { return testResourceNamer.randomName(prefix, maxLen); } /** * Generates a random UUID. * @return A randomly generated UUID. */ protected String generateRandomUuid() { return testResourceNamer.randomUuid(); } /** * Generates a random password. * @return random password */ public static String password() { String password = new ResourceNamer("").randomName("Pa5$", 12); LOGGER.info("Password: {}", password); return password; } private static String sshPublicKey; /** * Generates an SSH public key. * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } private static final Pattern SUBSCRIPTION_ID_PATTERN = Pattern.compile(SUBSCRIPTION_ID_REGEX); /** * Asserts that the resource ID is same. * * @param expected the expected resource ID. * @param actual the actual resource ID. */ protected void assertResourceIdEquals(String expected, String actual) { String sanitizedExpected = SUBSCRIPTION_ID_PATTERN.matcher(expected).replaceAll(ZERO_UUID); String sanitizedActual = SUBSCRIPTION_ID_PATTERN.matcher(actual).replaceAll(ZERO_UUID); Assertions.assertTrue(sanitizedExpected.equalsIgnoreCase(sanitizedActual), String.format("expected: %s but was: %s", expected, actual)); } /** * Loads a client ID from file. * * @return A client ID loaded from a file. */ protected String clientIdFromFile() { String clientId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID); return testResourceNamer.recordValueFromConfig(clientId); } /** * Return current Azure CLI signed-in user's userPrincipalName. * * @return current Azure CLI signed-in user. */ private static String getSafeWorkingDirectory() { if (IdentityUtil.isWindowsPlatform()) { String windowsSystemRoot = System.getenv("SystemRoot"); return CoreUtils.isNullOrEmpty(windowsSystemRoot) ? null : windowsSystemRoot + "\\system32"; } else { return "/bin/"; } } /** * Gets the test profile. * @return The test profile. */ protected AzureProfile profile() { return testProfile; } /** * Checks whether test mode is {@link TestMode * @return Whether the test mode is {@link TestMode */ protected boolean isPlaybackMode() { return getTestMode() == TestMode.PLAYBACK; } /** * Checks whether test should be skipped in playback. * @return Whether the test should be skipped in playback. */ protected boolean skipInPlayback() { if (isPlaybackMode()) { isSkipInPlayback = true; } return isSkipInPlayback; } @Override protected void beforeTest() { TokenCredential credential; HttpPipeline httpPipeline; String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL); HttpLogDetailLevel httpLogDetailLevel; try { httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel); } catch (Exception e) { if (isPlaybackMode()) { httpLogDetailLevel = HttpLogDetailLevel.NONE; LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL); } else { httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS; LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL); } } if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { try { System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); } catch (UnsupportedEncodingException e) { } } if (isPlaybackMode()) { testProfile = PLAYBACK_PROFILE; List<HttpPipelinePolicy> policies = new ArrayList<>(); httpPipeline = buildHttpPipeline( new MockTokenCredential(), testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, interceptorManager.getPlaybackClient()); if (!testContextManager.doNotRecordTest()) { interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version")).setExcludedHeaders(Arrays.asList("If-Match")))); addSanitizers(); removeSanitizers(); } } else { Configuration configuration = Configuration.getGlobalConfiguration(); String tenantId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID), "'AZURE_TENANT_ID' environment variable cannot be null."); String subscriptionId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID), "'AZURE_SUBSCRIPTION_ID' environment variable cannot be null."); credential = new DefaultAzureCredentialBuilder() .authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint()) .build(); testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE); List<HttpPipelinePolicy> policies = new ArrayList<>(); if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) { policies.add(this.interceptorManager.getRecordPolicy()); addSanitizers(); removeSanitizers(); } if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) { policies.add(new HttpDebugLoggingPolicy()); httpLogDetailLevel = HttpLogDetailLevel.NONE; } httpPipeline = buildHttpPipeline( credential, testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, generateHttpClientWithProxy(null, null)); } initializeClients(httpPipeline, testProfile); } /** * Generates an {@link HttpClient} with a proxy. * * @param clientBuilder The HttpClient builder. * @param proxyOptions The proxy. * @return An HttpClient with a proxy. */ protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) { if (clientBuilder == null) { clientBuilder = new NettyAsyncHttpClientBuilder(); } if (proxyOptions != null) { clientBuilder.proxy(proxyOptions); } else { try { System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE); List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint())); if (!proxies.isEmpty()) { for (Proxy proxy : proxies) { if (proxy.address() instanceof InetSocketAddress) { String host = ((InetSocketAddress) proxy.address()).getHostName(); int port = ((InetSocketAddress) proxy.address()).getPort(); switch (proxy.type()) { case HTTP: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build(); case SOCKS: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build(); default: } } } } String host = null; int port = 0; if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) { host = System.getProperty(HTTPS_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT)); } else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) { host = System.getProperty(HTTP_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT)); } if (host != null) { clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))); } } catch (URISyntaxException ignored) { } } return clientBuilder.build(); } @Override protected void afterTest() { if (!isSkipInPlayback) { cleanUpResources(); } } /** * Sets sdk context when running the tests * * @param internalContext the internal runtime context * @param objects the manager classes to change internal context * @param <T> the type of internal context * @throws RuntimeException when field cannot be found or set. */ protected <T> void setInternalContext(T internalContext, Object... objects) { try { for (Object obj : objects) { for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) { if (field.getName().equals("resourceManager")) { setAccessible(field); Field context = field.get(obj).getClass().getDeclaredField("internalContext"); setAccessible(context); context.set(field.get(obj), internalContext); } } for (Field field : obj.getClass().getDeclaredFields()) { if (field.getName().equals("internalContext")) { setAccessible(field); field.set(obj, internalContext); } else if (field.getName().contains("Manager")) { setAccessible(field); setInternalContext(internalContext, field.get(obj)); } } } } catch (IllegalAccessException | NoSuchFieldException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } private void setAccessible(final AccessibleObject accessibleObject) { Runnable runnable = () -> accessibleObject.setAccessible(true); runnable.run(); } /** * Builds the manager with provided http pipeline and profile in general manner. * * @param manager the class of the manager * @param httpPipeline the http pipeline * @param profile the azure profile * @param <T> the type of the manager * @return the manager instance * @throws RuntimeException when field cannot be found or set. */ protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) { try { Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass()); setAccessible(constructor); return constructor.newInstance(httpPipeline, profile); } catch (ReflectiveOperationException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Builds an HttpPipeline. * * @param credential The credentials to use in the pipeline. * @param profile The AzureProfile to use in the pipeline. * @param httpLogOptions The HTTP logging options to use in the pipeline. * @param policies Additional policies to use in the pipeline. * @param httpClient The HttpClient to use in the pipeline. * @return A new constructed HttpPipeline. */ protected abstract HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient); /** * Initializes service clients used in testing. * * @param httpPipeline The HttpPipeline to use in the clients. * @param profile The AzureProfile to use in the clients. */ protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile); /** * Cleans up resources. */ protected abstract void cleanUpResources(); private void addSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList( new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER), new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"), new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY) )); sanitizers.addAll(this.sanitizers); interceptorManager.addSanitizers(sanitizers); } private void removeSanitizers() { interceptorManager.removeSanitizers("AZSDK2003", "AZSDK2030", "AZSDK3430", "AZSDK3493"); } /** * Adds test proxy sanitizers. * <p> * Recommend to call this API in subclass constructor. * * @param sanitizers the test proxy sanitizers. */ protected void addSanitizers(TestProxySanitizer... sanitizers) { this.sanitizers.addAll(Arrays.asList(sanitizers)); } private final class PlaybackTimeoutInterceptor implements InvocationInterceptor { private final Duration duration; private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) { Objects.requireNonNull(timeoutSupplier); this.duration = timeoutSupplier.get(); } @Override public void interceptTestMethod(Invocation<Void> invocation, ReflectiveInvocationContext<Method> invocationContext, ExtensionContext extensionContext) throws Throwable { if (isPlaybackMode()) { Assertions.assertTimeoutPreemptively(duration, invocation::proceed); } else { invocation.proceed(); } } } }
class ResourceManagerTestProxyTestBase extends TestProxyTestBase { private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000"; private static final String SUBSCRIPTION_ID_REGEX = "(?<=/subscriptions/)([^/?]+)"; private static final String ZERO_SUBSCRIPTION = ZERO_UUID; private static final String ZERO_TENANT = ZERO_UUID; private static final String PLAYBACK_URI_BASE = "https: private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION"; private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL"; private static final String HTTPS_PROXY_HOST = "https.proxyHost"; private static final String HTTPS_PROXY_PORT = "https.proxyPort"; private static final String HTTP_PROXY_HOST = "http.proxyHost"; private static final String HTTP_PROXY_PORT = "http.proxyPort"; private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies"; private static final String VALUE_TRUE = "true"; private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234"; private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile( ZERO_TENANT, ZERO_SUBSCRIPTION, new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values()) .collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI))) ); private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() { @Override public void write(int b) { } @Override public void write(byte[] b) { } @Override public void write(byte[] b, int off, int len) { } }; /** * Redacted value. */ protected static final String REDACTED_VALUE = "REDACTED"; private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class); private AzureProfile testProfile; private boolean isSkipInPlayback; private final List<TestProxySanitizer> sanitizers = new ArrayList<>(); /** * Sets upper bound execution timeout for each @Test method. * {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper * bound. */ @RegisterExtension final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60)); /** * Initializes ResourceManagerTestProxyTestBase class. */ protected ResourceManagerTestProxyTestBase() { } /** * Generates a random resource name. * * @param prefix Prefix for the resource name. * @param maxLen Maximum length of the resource name. * @return A randomly generated resource name with a given prefix and maximum length. */ protected String generateRandomResourceName(String prefix, int maxLen) { return testResourceNamer.randomName(prefix, maxLen); } /** * Generates a random UUID. * @return A randomly generated UUID. */ protected String generateRandomUuid() { return testResourceNamer.randomUuid(); } /** * Generates a random password. * @return random password */ public static String password() { String password = new ResourceNamer("").randomName("Pa5$", 12); LOGGER.info("Password: {}", password); return password; } private static String sshPublicKey; /** * Generates an SSH public key. * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } private static final Pattern SUBSCRIPTION_ID_PATTERN = Pattern.compile(SUBSCRIPTION_ID_REGEX); /** * Asserts that the resource ID is same. * * @param expected the expected resource ID. * @param actual the actual resource ID. */ protected void assertResourceIdEquals(String expected, String actual) { String sanitizedExpected = SUBSCRIPTION_ID_PATTERN.matcher(expected).replaceAll(ZERO_UUID); String sanitizedActual = SUBSCRIPTION_ID_PATTERN.matcher(actual).replaceAll(ZERO_UUID); Assertions.assertTrue(sanitizedExpected.equalsIgnoreCase(sanitizedActual), String.format("expected: %s but was: %s", expected, actual)); } /** * Loads a client ID from file. * * @return A client ID loaded from a file. */ protected String clientIdFromFile() { String clientId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID); return testResourceNamer.recordValueFromConfig(clientId); } /** * Return current Azure CLI signed-in user's userPrincipalName. * * @return current Azure CLI signed-in user. */ private static String getSafeWorkingDirectory() { if (IdentityUtil.isWindowsPlatform()) { String windowsSystemRoot = System.getenv("SystemRoot"); return CoreUtils.isNullOrEmpty(windowsSystemRoot) ? null : windowsSystemRoot + "\\system32"; } else { return "/bin/"; } } /** * Gets the test profile. * @return The test profile. */ protected AzureProfile profile() { return testProfile; } /** * Checks whether test mode is {@link TestMode * @return Whether the test mode is {@link TestMode */ protected boolean isPlaybackMode() { return getTestMode() == TestMode.PLAYBACK; } /** * Checks whether test should be skipped in playback. * @return Whether the test should be skipped in playback. */ protected boolean skipInPlayback() { if (isPlaybackMode()) { isSkipInPlayback = true; } return isSkipInPlayback; } @Override protected void beforeTest() { TokenCredential credential; HttpPipeline httpPipeline; String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL); HttpLogDetailLevel httpLogDetailLevel; try { httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel); } catch (Exception e) { if (isPlaybackMode()) { httpLogDetailLevel = HttpLogDetailLevel.NONE; LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL); } else { httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS; LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL); } } if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { try { System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); } catch (UnsupportedEncodingException e) { } } if (isPlaybackMode()) { testProfile = PLAYBACK_PROFILE; List<HttpPipelinePolicy> policies = new ArrayList<>(); httpPipeline = buildHttpPipeline( new MockTokenCredential(), testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, interceptorManager.getPlaybackClient()); if (!testContextManager.doNotRecordTest()) { interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version")).setExcludedHeaders(Arrays.asList("If-Match")))); addSanitizers(); removeSanitizers(); } } else { Configuration configuration = Configuration.getGlobalConfiguration(); String tenantId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID), "'AZURE_TENANT_ID' environment variable cannot be null."); String subscriptionId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID), "'AZURE_SUBSCRIPTION_ID' environment variable cannot be null."); credential = new DefaultAzureCredentialBuilder() .authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint()) .build(); testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE); List<HttpPipelinePolicy> policies = new ArrayList<>(); if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) { policies.add(this.interceptorManager.getRecordPolicy()); addSanitizers(); removeSanitizers(); } if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) { policies.add(new HttpDebugLoggingPolicy()); httpLogDetailLevel = HttpLogDetailLevel.NONE; } httpPipeline = buildHttpPipeline( credential, testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, generateHttpClientWithProxy(null, null)); } initializeClients(httpPipeline, testProfile); } /** * Generates an {@link HttpClient} with a proxy. * * @param clientBuilder The HttpClient builder. * @param proxyOptions The proxy. * @return An HttpClient with a proxy. */ protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) { if (clientBuilder == null) { clientBuilder = new NettyAsyncHttpClientBuilder(); } if (proxyOptions != null) { clientBuilder.proxy(proxyOptions); } else { try { System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE); List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint())); if (!proxies.isEmpty()) { for (Proxy proxy : proxies) { if (proxy.address() instanceof InetSocketAddress) { String host = ((InetSocketAddress) proxy.address()).getHostName(); int port = ((InetSocketAddress) proxy.address()).getPort(); switch (proxy.type()) { case HTTP: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build(); case SOCKS: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build(); default: } } } } String host = null; int port = 0; if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) { host = System.getProperty(HTTPS_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT)); } else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) { host = System.getProperty(HTTP_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT)); } if (host != null) { clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))); } } catch (URISyntaxException ignored) { } } return clientBuilder.build(); } @Override protected void afterTest() { if (!isSkipInPlayback) { cleanUpResources(); } } /** * Sets sdk context when running the tests * * @param internalContext the internal runtime context * @param objects the manager classes to change internal context * @param <T> the type of internal context * @throws RuntimeException when field cannot be found or set. */ protected <T> void setInternalContext(T internalContext, Object... objects) { try { for (Object obj : objects) { for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) { if (field.getName().equals("resourceManager")) { setAccessible(field); Field context = field.get(obj).getClass().getDeclaredField("internalContext"); setAccessible(context); context.set(field.get(obj), internalContext); } } for (Field field : obj.getClass().getDeclaredFields()) { if (field.getName().equals("internalContext")) { setAccessible(field); field.set(obj, internalContext); } else if (field.getName().contains("Manager")) { setAccessible(field); setInternalContext(internalContext, field.get(obj)); } } } } catch (IllegalAccessException | NoSuchFieldException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } private void setAccessible(final AccessibleObject accessibleObject) { Runnable runnable = () -> accessibleObject.setAccessible(true); runnable.run(); } /** * Builds the manager with provided http pipeline and profile in general manner. * * @param manager the class of the manager * @param httpPipeline the http pipeline * @param profile the azure profile * @param <T> the type of the manager * @return the manager instance * @throws RuntimeException when field cannot be found or set. */ protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) { try { Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass()); setAccessible(constructor); return constructor.newInstance(httpPipeline, profile); } catch (ReflectiveOperationException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Builds an HttpPipeline. * * @param credential The credentials to use in the pipeline. * @param profile The AzureProfile to use in the pipeline. * @param httpLogOptions The HTTP logging options to use in the pipeline. * @param policies Additional policies to use in the pipeline. * @param httpClient The HttpClient to use in the pipeline. * @return A new constructed HttpPipeline. */ protected abstract HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient); /** * Initializes service clients used in testing. * * @param httpPipeline The HttpPipeline to use in the clients. * @param profile The AzureProfile to use in the clients. */ protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile); /** * Cleans up resources. */ protected abstract void cleanUpResources(); private void addSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList( new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER), new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"), new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY) )); sanitizers.addAll(this.sanitizers); interceptorManager.addSanitizers(sanitizers); } private void removeSanitizers() { interceptorManager.removeSanitizers("AZSDK2003", "AZSDK2030", "AZSDK3430", "AZSDK3493"); } /** * Adds test proxy sanitizers. * <p> * Recommend to call this API in subclass constructor. * * @param sanitizers the test proxy sanitizers. */ protected void addSanitizers(TestProxySanitizer... sanitizers) { this.sanitizers.addAll(Arrays.asList(sanitizers)); } private final class PlaybackTimeoutInterceptor implements InvocationInterceptor { private final Duration duration; private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) { Objects.requireNonNull(timeoutSupplier); this.duration = timeoutSupplier.get(); } @Override public void interceptTestMethod(Invocation<Void> invocation, ReflectiveInvocationContext<Method> invocationContext, ExtensionContext extensionContext) throws Throwable { if (isPlaybackMode()) { Assertions.assertTimeoutPreemptively(duration, invocation::proceed); } else { invocation.proceed(); } } } }
sooner the better. we use spotless everywhere from java agent to otel upstream. it's a handy tool for us.
private static boolean isClient(String metricName) { return metricName.contains(".client."); }
return metricName.contains(".client.");
private static boolean isClient(String metricName) { return metricName.contains(".client."); }
class MetricDataMapper { private static final ClientLogger logger = new ClientLogger(MetricDataMapper.class); private static final Set<String> OTEL_UNSTABLE_METRICS_TO_EXCLUDE = new HashSet<>(); private static final String OTEL_INSTRUMENTATION_NAME_PREFIX = "io.opentelemetry"; private static final Set<String> OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES = new HashSet<>(4); public static final AttributeKey<String> APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME = AttributeKey.stringKey("applicationinsights.internal.metric_name"); private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final boolean captureHttpServer4xxAsError; static { OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.client.duration"); OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.server.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.server.request.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.client.request.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.client.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.server.duration"); } public MetricDataMapper( BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, boolean captureHttpServer4xxAsError) { this.telemetryInitializer = telemetryInitializer; this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; } public void map(MetricData metricData, Consumer<TelemetryItem> consumer) { MetricDataType type = metricData.getType(); if (type == DOUBLE_SUM || type == DOUBLE_GAUGE || type == LONG_SUM || type == LONG_GAUGE || type == HISTOGRAM) { boolean isPreAggregatedStandardMetric = OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.contains(metricData.getName()); if (isPreAggregatedStandardMetric) { List<TelemetryItem> preAggregatedStandardMetrics = convertOtelMetricToAzureMonitorMetric(metricData, true); preAggregatedStandardMetrics.forEach(consumer::accept); } if (OTEL_UNSTABLE_METRICS_TO_EXCLUDE.contains(metricData.getName()) && metricData.getInstrumentationScopeInfo().getName().startsWith(OTEL_INSTRUMENTATION_NAME_PREFIX)) { return; } List<TelemetryItem> stableOtelMetrics = convertOtelMetricToAzureMonitorMetric(metricData, false); stableOtelMetrics.forEach(consumer::accept); } else { logger.warning("metric data type {} is not supported yet.", metricData.getType()); } } private List<TelemetryItem> convertOtelMetricToAzureMonitorMetric( MetricData metricData, boolean isPreAggregatedStandardMetric) { List<TelemetryItem> telemetryItems = new ArrayList<>(); for (PointData pointData : metricData.getData().getPoints()) { MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); telemetryInitializer.accept(builder, metricData.getResource()); builder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(pointData.getEpochNanos())); updateMetricPointBuilder( builder, metricData, pointData, captureHttpServer4xxAsError, isPreAggregatedStandardMetric); telemetryItems.add(builder.build()); } return telemetryItems; } public static void updateMetricPointBuilder( MetricTelemetryBuilder metricTelemetryBuilder, MetricData metricData, PointData pointData, boolean captureHttpServer4xxAsError, boolean isPreAggregatedStandardMetric) { checkArgument(metricData != null, "MetricData cannot be null."); MetricPointBuilder pointBuilder = new MetricPointBuilder(); MetricDataType type = metricData.getType(); double pointDataValue; switch (type) { case LONG_SUM: case LONG_GAUGE: pointDataValue = (double) ((LongPointData) pointData).getValue(); break; case DOUBLE_SUM: case DOUBLE_GAUGE: pointDataValue = ((DoublePointData) pointData).getValue(); break; case HISTOGRAM: long histogramCount = ((HistogramPointData) pointData).getCount(); if (histogramCount <= Integer.MAX_VALUE && histogramCount >= Integer.MIN_VALUE) { pointBuilder.setCount((int) histogramCount); } HistogramPointData histogramPointData = (HistogramPointData) pointData; double min = histogramPointData.getMin(); double max = histogramPointData.getMax(); if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) { min = min * 1000; max = max * 1000; } pointDataValue = histogramPointData.getSum(); pointBuilder.setMin(min); pointBuilder.setMax(max); break; case SUMMARY: case EXPONENTIAL_HISTOGRAM: default: throw new IllegalArgumentException("metric data type '" + type + "' is not supported yet"); } if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) { pointDataValue = pointDataValue * 1000; } pointBuilder.setValue(pointDataValue); String metricName = pointData.getAttributes().get(APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME); if (metricName != null) { pointBuilder.setName(metricName); } else { pointBuilder.setName(metricData.getName()); } metricTelemetryBuilder.setMetricPoint(pointBuilder); Attributes attributes = pointData.getAttributes(); if (isPreAggregatedStandardMetric) { Long statusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); boolean success = isSuccess(metricData.getName(), statusCode, captureHttpServer4xxAsError); Boolean isSynthetic = attributes.get(IS_SYNTHETIC); attributes.forEach( (key, value) -> applyConnectionStringAndRoleNameOverrides( metricTelemetryBuilder, value, key.getKey())); if (isServer(metricData.getName())) { RequestExtractor.extract(metricTelemetryBuilder, statusCode, success, isSynthetic); } else if (isClient(metricData.getName())) { String dependencyType; int defaultPort; if (metricData.getName().startsWith("http")) { dependencyType = "Http"; defaultPort = getDefaultPortForHttpScheme(getStableOrOldAttribute(attributes, SemanticAttributes.URL_SCHEME, SemanticAttributes.HTTP_SCHEME)); } else { dependencyType = attributes.get(SemanticAttributes.RPC_SYSTEM); if (dependencyType == null) { dependencyType = "Unknown"; } defaultPort = Integer.MAX_VALUE; } String target = SpanDataMapper.getTargetOrDefault(attributes, defaultPort, dependencyType); DependencyExtractor.extract( metricTelemetryBuilder, statusCode, success, dependencyType, target, isSynthetic); } } else { MappingsBuilder mappingsBuilder = new MappingsBuilder(METRIC); mappingsBuilder.build().map(attributes, metricTelemetryBuilder); } } private static boolean shouldConvertToMilliseconds(String metricName, boolean isPreAggregatedStandardMetric) { return isPreAggregatedStandardMetric && (metricName.equals("http.server.request.duration") || metricName.equals("http.client.request.duration")); } private static boolean applyConnectionStringAndRoleNameOverrides( AbstractTelemetryBuilder telemetryBuilder, Object value, String key) { if (key.equals(AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey()) && value instanceof String) { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); return true; } if (key.equals(AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey()) && value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); return true; } return false; } private static int getDefaultPortForHttpScheme(@Nullable String httpScheme) { if (httpScheme == null) { return Integer.MAX_VALUE; } if (httpScheme.equals("https")) { return 443; } if (httpScheme.equals("http")) { return 80; } return Integer.MAX_VALUE; } private static boolean isSuccess(String metricName, Long statusCode, boolean captureHttpServer4xxAsError) { if (statusCode == null) { return true; } if (isClient(metricName)) { return statusCode < 400; } if (isServer(metricName)) { if (captureHttpServer4xxAsError) { return statusCode < 400; } return statusCode < 500; } return false; } private static boolean isServer(String metricName) { return metricName.contains(".server."); } }
class MetricDataMapper { private static final ClientLogger logger = new ClientLogger(MetricDataMapper.class); private static final Set<String> OTEL_UNSTABLE_METRICS_TO_EXCLUDE = new HashSet<>(); private static final String OTEL_INSTRUMENTATION_NAME_PREFIX = "io.opentelemetry"; private static final Set<String> OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES = new HashSet<>(4); public static final AttributeKey<String> APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME = AttributeKey.stringKey("applicationinsights.internal.metric_name"); private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final boolean captureHttpServer4xxAsError; static { OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.client.duration"); OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.server.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.server.request.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.client.request.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.client.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.server.duration"); } public MetricDataMapper( BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, boolean captureHttpServer4xxAsError) { this.telemetryInitializer = telemetryInitializer; this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; } public void map(MetricData metricData, Consumer<TelemetryItem> consumer) { MetricDataType type = metricData.getType(); if (type == DOUBLE_SUM || type == DOUBLE_GAUGE || type == LONG_SUM || type == LONG_GAUGE || type == HISTOGRAM) { boolean isPreAggregatedStandardMetric = OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.contains(metricData.getName()); if (isPreAggregatedStandardMetric) { List<TelemetryItem> preAggregatedStandardMetrics = convertOtelMetricToAzureMonitorMetric(metricData, true); preAggregatedStandardMetrics.forEach(consumer::accept); } if (OTEL_UNSTABLE_METRICS_TO_EXCLUDE.contains(metricData.getName()) && metricData.getInstrumentationScopeInfo().getName().startsWith(OTEL_INSTRUMENTATION_NAME_PREFIX)) { return; } List<TelemetryItem> stableOtelMetrics = convertOtelMetricToAzureMonitorMetric(metricData, false); stableOtelMetrics.forEach(consumer::accept); } else { logger.warning("metric data type {} is not supported yet.", metricData.getType()); } } private List<TelemetryItem> convertOtelMetricToAzureMonitorMetric( MetricData metricData, boolean isPreAggregatedStandardMetric) { List<TelemetryItem> telemetryItems = new ArrayList<>(); for (PointData pointData : metricData.getData().getPoints()) { MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); telemetryInitializer.accept(builder, metricData.getResource()); builder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(pointData.getEpochNanos())); updateMetricPointBuilder( builder, metricData, pointData, captureHttpServer4xxAsError, isPreAggregatedStandardMetric); telemetryItems.add(builder.build()); } return telemetryItems; } public static void updateMetricPointBuilder( MetricTelemetryBuilder metricTelemetryBuilder, MetricData metricData, PointData pointData, boolean captureHttpServer4xxAsError, boolean isPreAggregatedStandardMetric) { checkArgument(metricData != null, "MetricData cannot be null."); MetricPointBuilder pointBuilder = new MetricPointBuilder(); MetricDataType type = metricData.getType(); double pointDataValue; switch (type) { case LONG_SUM: case LONG_GAUGE: pointDataValue = (double) ((LongPointData) pointData).getValue(); break; case DOUBLE_SUM: case DOUBLE_GAUGE: pointDataValue = ((DoublePointData) pointData).getValue(); break; case HISTOGRAM: long histogramCount = ((HistogramPointData) pointData).getCount(); if (histogramCount <= Integer.MAX_VALUE && histogramCount >= Integer.MIN_VALUE) { pointBuilder.setCount((int) histogramCount); } HistogramPointData histogramPointData = (HistogramPointData) pointData; double min = histogramPointData.getMin(); double max = histogramPointData.getMax(); if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) { min = min * 1000; max = max * 1000; } pointDataValue = histogramPointData.getSum(); pointBuilder.setMin(min); pointBuilder.setMax(max); break; case SUMMARY: case EXPONENTIAL_HISTOGRAM: default: throw new IllegalArgumentException("metric data type '" + type + "' is not supported yet"); } if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) { pointDataValue = pointDataValue * 1000; } pointBuilder.setValue(pointDataValue); String metricName = pointData.getAttributes().get(APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME); if (metricName != null) { pointBuilder.setName(metricName); } else { pointBuilder.setName(metricData.getName()); } metricTelemetryBuilder.setMetricPoint(pointBuilder); Attributes attributes = pointData.getAttributes(); if (isPreAggregatedStandardMetric) { Long statusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); boolean success = isSuccess(metricData.getName(), statusCode, captureHttpServer4xxAsError); Boolean isSynthetic = attributes.get(IS_SYNTHETIC); attributes.forEach( (key, value) -> applyConnectionStringAndRoleNameOverrides( metricTelemetryBuilder, value, key.getKey())); if (isServer(metricData.getName())) { RequestExtractor.extract(metricTelemetryBuilder, statusCode, success, isSynthetic); } else if (isClient(metricData.getName())) { String dependencyType; int defaultPort; if (metricData.getName().startsWith("http")) { dependencyType = "Http"; defaultPort = getDefaultPortForHttpScheme(getStableOrOldAttribute(attributes, SemanticAttributes.URL_SCHEME, SemanticAttributes.HTTP_SCHEME)); } else { dependencyType = attributes.get(SemanticAttributes.RPC_SYSTEM); if (dependencyType == null) { dependencyType = "Unknown"; } defaultPort = Integer.MAX_VALUE; } String target = SpanDataMapper.getTargetOrDefault(attributes, defaultPort, dependencyType); DependencyExtractor.extract( metricTelemetryBuilder, statusCode, success, dependencyType, target, isSynthetic); } } else { MappingsBuilder mappingsBuilder = new MappingsBuilder(METRIC); mappingsBuilder.build().map(attributes, metricTelemetryBuilder); } } private static boolean shouldConvertToMilliseconds(String metricName, boolean isPreAggregatedStandardMetric) { return isPreAggregatedStandardMetric && (metricName.equals("http.server.request.duration") || metricName.equals("http.client.request.duration")); } private static boolean applyConnectionStringAndRoleNameOverrides( AbstractTelemetryBuilder telemetryBuilder, Object value, String key) { if (key.equals(AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey()) && value instanceof String) { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); return true; } if (key.equals(AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey()) && value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); return true; } return false; } private static int getDefaultPortForHttpScheme(@Nullable String httpScheme) { if (httpScheme == null) { return Integer.MAX_VALUE; } if (httpScheme.equals("https")) { return 443; } if (httpScheme.equals("http")) { return 80; } return Integer.MAX_VALUE; } private static boolean isSuccess(String metricName, Long statusCode, boolean captureHttpServer4xxAsError) { if (statusCode == null) { return true; } if (isClient(metricName)) { return statusCode < 400; } if (isServer(metricName)) { if (captureHttpServer4xxAsError) { return statusCode < 400; } return statusCode < 500; } return false; } private static boolean isServer(String metricName) { return metricName.contains(".server."); } }
Can it be run from cwd?
protected AzureUser azureCliSignedInUser() { AzureUser azureCliUser = new AzureUser(testResourceNamer); if (!isPlaybackMode()) { String azCommand = "az ad signed-in-user show --output json"; final Pattern windowsProcessErrorMessage = Pattern.compile("'azd?' is not recognized"); final Pattern shProcessErrorMessage = Pattern.compile("azd?:.*not found"); try { String starter; String switcher; if (IdentityUtil.isWindowsPlatform()) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); builder.redirectInput(ProcessBuilder.Redirect.from(IdentityUtil.NULL_FILE)); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (windowsProcessErrorMessage.matcher(line).find() || shProcessErrorMessage.matcher(line).find()) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { if (processOutput.contains("az login") || processOutput.contains("az account set")) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException("get Azure CLI current signed-in user failed", null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Get Azure CLI signed-in user => A response was received from Azure CLI, deserializing the" + " response into an signed-in user."); try (JsonReader reader = JsonProviders.createReader(processOutput)) { Map<String, Object> signedInUserInfo = reader.readMap(JsonReader::readUntyped); String userPrincipalName = (String) signedInUserInfo.get("userPrincipalName"); String id = (String) signedInUserInfo.get("id"); azureCliUser = new AzureUser(testResourceNamer, id, userPrincipalName); } } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } } return azureCliUser; }
}
protected AzureUser azureCliSignedInUser() { AzureUser azureCliUser = new AzureUser(testResourceNamer); if (!isPlaybackMode()) { String azCommand = "az ad signed-in-user show --output json"; final Pattern windowsProcessErrorMessage = Pattern.compile("'azd?' is not recognized"); final Pattern shProcessErrorMessage = Pattern.compile("azd?:.*not found"); try { String starter; String switcher; if (IdentityUtil.isWindowsPlatform()) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); builder.redirectInput(ProcessBuilder.Redirect.from(IdentityUtil.NULL_FILE)); builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (windowsProcessErrorMessage.matcher(line).find() || shProcessErrorMessage.matcher(line).find()) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { if (processOutput.contains("az login") || processOutput.contains("az account set")) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException("get Azure CLI current signed-in user failed", null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Get Azure CLI signed-in user => A response was received from Azure CLI, deserializing the" + " response into an signed-in user."); try (JsonReader reader = JsonProviders.createReader(processOutput)) { Map<String, Object> signedInUserInfo = reader.readMap(JsonReader::readUntyped); String userPrincipalName = (String) signedInUserInfo.get("userPrincipalName"); String id = (String) signedInUserInfo.get("id"); azureCliUser = new AzureUser(testResourceNamer, id, userPrincipalName); } } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } } return azureCliUser; }
class ResourceManagerTestProxyTestBase extends TestProxyTestBase { private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000"; private static final String SUBSCRIPTION_ID_REGEX = "(?<=/subscriptions/)([^/?]+)"; private static final String ZERO_SUBSCRIPTION = ZERO_UUID; private static final String ZERO_TENANT = ZERO_UUID; private static final String PLAYBACK_URI_BASE = "https: private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION"; private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL"; private static final String HTTPS_PROXY_HOST = "https.proxyHost"; private static final String HTTPS_PROXY_PORT = "https.proxyPort"; private static final String HTTP_PROXY_HOST = "http.proxyHost"; private static final String HTTP_PROXY_PORT = "http.proxyPort"; private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies"; private static final String VALUE_TRUE = "true"; private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234"; private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile( ZERO_TENANT, ZERO_SUBSCRIPTION, new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values()) .collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI))) ); private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() { @Override public void write(int b) { } @Override public void write(byte[] b) { } @Override public void write(byte[] b, int off, int len) { } }; /** * Redacted value. */ protected static final String REDACTED_VALUE = "REDACTED"; private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class); private AzureProfile testProfile; private boolean isSkipInPlayback; private final List<TestProxySanitizer> sanitizers = new ArrayList<>(); /** * Sets upper bound execution timeout for each @Test method. * {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper * bound. */ @RegisterExtension final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60)); /** * Initializes ResourceManagerTestProxyTestBase class. */ protected ResourceManagerTestProxyTestBase() { } /** * Generates a random resource name. * * @param prefix Prefix for the resource name. * @param maxLen Maximum length of the resource name. * @return A randomly generated resource name with a given prefix and maximum length. */ protected String generateRandomResourceName(String prefix, int maxLen) { return testResourceNamer.randomName(prefix, maxLen); } /** * Generates a random UUID. * @return A randomly generated UUID. */ protected String generateRandomUuid() { return testResourceNamer.randomUuid(); } /** * Generates a random password. * @return random password */ public static String password() { String password = new ResourceNamer("").randomName("Pa5$", 12); LOGGER.info("Password: {}", password); return password; } private static String sshPublicKey; /** * Generates an SSH public key. * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } private static final Pattern SUBSCRIPTION_ID_PATTERN = Pattern.compile(SUBSCRIPTION_ID_REGEX); /** * Asserts that the resource ID is same. * * @param expected the expected resource ID. * @param actual the actual resource ID. */ protected void assertResourceIdEquals(String expected, String actual) { String sanitizedExpected = SUBSCRIPTION_ID_PATTERN.matcher(expected).replaceAll(ZERO_UUID); String sanitizedActual = SUBSCRIPTION_ID_PATTERN.matcher(actual).replaceAll(ZERO_UUID); Assertions.assertTrue(sanitizedExpected.equalsIgnoreCase(sanitizedActual), String.format("expected: %s but was: %s", expected, actual)); } /** * Loads a client ID from file. * * @return A client ID loaded from a file. */ protected String clientIdFromFile() { String clientId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID); return testResourceNamer.recordValueFromConfig(clientId); } /** * Return current Azure CLI signed-in user's userPrincipalName. * * @return current Azure CLI signed-in user. */ private static String getSafeWorkingDirectory() { if (IdentityUtil.isWindowsPlatform()) { String windowsSystemRoot = System.getenv("SystemRoot"); return CoreUtils.isNullOrEmpty(windowsSystemRoot) ? null : windowsSystemRoot + "\\system32"; } else { return "/bin/"; } } /** * Gets the test profile. * @return The test profile. */ protected AzureProfile profile() { return testProfile; } /** * Checks whether test mode is {@link TestMode * @return Whether the test mode is {@link TestMode */ protected boolean isPlaybackMode() { return getTestMode() == TestMode.PLAYBACK; } /** * Checks whether test should be skipped in playback. * @return Whether the test should be skipped in playback. */ protected boolean skipInPlayback() { if (isPlaybackMode()) { isSkipInPlayback = true; } return isSkipInPlayback; } @Override protected void beforeTest() { TokenCredential credential; HttpPipeline httpPipeline; String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL); HttpLogDetailLevel httpLogDetailLevel; try { httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel); } catch (Exception e) { if (isPlaybackMode()) { httpLogDetailLevel = HttpLogDetailLevel.NONE; LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL); } else { httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS; LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL); } } if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { try { System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); } catch (UnsupportedEncodingException e) { } } if (isPlaybackMode()) { testProfile = PLAYBACK_PROFILE; List<HttpPipelinePolicy> policies = new ArrayList<>(); httpPipeline = buildHttpPipeline( new MockTokenCredential(), testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, interceptorManager.getPlaybackClient()); if (!testContextManager.doNotRecordTest()) { interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version")).setExcludedHeaders(Arrays.asList("If-Match")))); addSanitizers(); removeSanitizers(); } } else { Configuration configuration = Configuration.getGlobalConfiguration(); String tenantId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID), "'AZURE_TENANT_ID' environment variable cannot be null."); String subscriptionId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID), "'AZURE_SUBSCRIPTION_ID' environment variable cannot be null."); credential = new DefaultAzureCredentialBuilder() .authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint()) .build(); testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE); List<HttpPipelinePolicy> policies = new ArrayList<>(); if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) { policies.add(this.interceptorManager.getRecordPolicy()); addSanitizers(); removeSanitizers(); } if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) { policies.add(new HttpDebugLoggingPolicy()); httpLogDetailLevel = HttpLogDetailLevel.NONE; } httpPipeline = buildHttpPipeline( credential, testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, generateHttpClientWithProxy(null, null)); } initializeClients(httpPipeline, testProfile); } /** * Generates an {@link HttpClient} with a proxy. * * @param clientBuilder The HttpClient builder. * @param proxyOptions The proxy. * @return An HttpClient with a proxy. */ protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) { if (clientBuilder == null) { clientBuilder = new NettyAsyncHttpClientBuilder(); } if (proxyOptions != null) { clientBuilder.proxy(proxyOptions); } else { try { System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE); List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint())); if (!proxies.isEmpty()) { for (Proxy proxy : proxies) { if (proxy.address() instanceof InetSocketAddress) { String host = ((InetSocketAddress) proxy.address()).getHostName(); int port = ((InetSocketAddress) proxy.address()).getPort(); switch (proxy.type()) { case HTTP: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build(); case SOCKS: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build(); default: } } } } String host = null; int port = 0; if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) { host = System.getProperty(HTTPS_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT)); } else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) { host = System.getProperty(HTTP_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT)); } if (host != null) { clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))); } } catch (URISyntaxException ignored) { } } return clientBuilder.build(); } @Override protected void afterTest() { if (!isSkipInPlayback) { cleanUpResources(); } } /** * Sets sdk context when running the tests * * @param internalContext the internal runtime context * @param objects the manager classes to change internal context * @param <T> the type of internal context * @throws RuntimeException when field cannot be found or set. */ protected <T> void setInternalContext(T internalContext, Object... objects) { try { for (Object obj : objects) { for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) { if (field.getName().equals("resourceManager")) { setAccessible(field); Field context = field.get(obj).getClass().getDeclaredField("internalContext"); setAccessible(context); context.set(field.get(obj), internalContext); } } for (Field field : obj.getClass().getDeclaredFields()) { if (field.getName().equals("internalContext")) { setAccessible(field); field.set(obj, internalContext); } else if (field.getName().contains("Manager")) { setAccessible(field); setInternalContext(internalContext, field.get(obj)); } } } } catch (IllegalAccessException | NoSuchFieldException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } private void setAccessible(final AccessibleObject accessibleObject) { Runnable runnable = () -> accessibleObject.setAccessible(true); runnable.run(); } /** * Builds the manager with provided http pipeline and profile in general manner. * * @param manager the class of the manager * @param httpPipeline the http pipeline * @param profile the azure profile * @param <T> the type of the manager * @return the manager instance * @throws RuntimeException when field cannot be found or set. */ protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) { try { Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass()); setAccessible(constructor); return constructor.newInstance(httpPipeline, profile); } catch (ReflectiveOperationException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Builds an HttpPipeline. * * @param credential The credentials to use in the pipeline. * @param profile The AzureProfile to use in the pipeline. * @param httpLogOptions The HTTP logging options to use in the pipeline. * @param policies Additional policies to use in the pipeline. * @param httpClient The HttpClient to use in the pipeline. * @return A new constructed HttpPipeline. */ protected abstract HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient); /** * Initializes service clients used in testing. * * @param httpPipeline The HttpPipeline to use in the clients. * @param profile The AzureProfile to use in the clients. */ protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile); /** * Cleans up resources. */ protected abstract void cleanUpResources(); private void addSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList( new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER), new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"), new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY) )); sanitizers.addAll(this.sanitizers); interceptorManager.addSanitizers(sanitizers); } private void removeSanitizers() { interceptorManager.removeSanitizers("AZSDK2003", "AZSDK2030", "AZSDK3430", "AZSDK3493"); } /** * Adds test proxy sanitizers. * <p> * Recommend to call this API in subclass constructor. * * @param sanitizers the test proxy sanitizers. */ protected void addSanitizers(TestProxySanitizer... sanitizers) { this.sanitizers.addAll(Arrays.asList(sanitizers)); } private final class PlaybackTimeoutInterceptor implements InvocationInterceptor { private final Duration duration; private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) { Objects.requireNonNull(timeoutSupplier); this.duration = timeoutSupplier.get(); } @Override public void interceptTestMethod(Invocation<Void> invocation, ReflectiveInvocationContext<Method> invocationContext, ExtensionContext extensionContext) throws Throwable { if (isPlaybackMode()) { Assertions.assertTimeoutPreemptively(duration, invocation::proceed); } else { invocation.proceed(); } } } }
class ResourceManagerTestProxyTestBase extends TestProxyTestBase { private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000"; private static final String SUBSCRIPTION_ID_REGEX = "(?<=/subscriptions/)([^/?]+)"; private static final String ZERO_SUBSCRIPTION = ZERO_UUID; private static final String ZERO_TENANT = ZERO_UUID; private static final String PLAYBACK_URI_BASE = "https: private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION"; private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL"; private static final String HTTPS_PROXY_HOST = "https.proxyHost"; private static final String HTTPS_PROXY_PORT = "https.proxyPort"; private static final String HTTP_PROXY_HOST = "http.proxyHost"; private static final String HTTP_PROXY_PORT = "http.proxyPort"; private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies"; private static final String VALUE_TRUE = "true"; private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234"; private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile( ZERO_TENANT, ZERO_SUBSCRIPTION, new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values()) .collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI))) ); private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() { @Override public void write(int b) { } @Override public void write(byte[] b) { } @Override public void write(byte[] b, int off, int len) { } }; /** * Redacted value. */ protected static final String REDACTED_VALUE = "REDACTED"; private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class); private AzureProfile testProfile; private boolean isSkipInPlayback; private final List<TestProxySanitizer> sanitizers = new ArrayList<>(); /** * Sets upper bound execution timeout for each @Test method. * {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper * bound. */ @RegisterExtension final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60)); /** * Initializes ResourceManagerTestProxyTestBase class. */ protected ResourceManagerTestProxyTestBase() { } /** * Generates a random resource name. * * @param prefix Prefix for the resource name. * @param maxLen Maximum length of the resource name. * @return A randomly generated resource name with a given prefix and maximum length. */ protected String generateRandomResourceName(String prefix, int maxLen) { return testResourceNamer.randomName(prefix, maxLen); } /** * Generates a random UUID. * @return A randomly generated UUID. */ protected String generateRandomUuid() { return testResourceNamer.randomUuid(); } /** * Generates a random password. * @return random password */ public static String password() { String password = new ResourceNamer("").randomName("Pa5$", 12); LOGGER.info("Password: {}", password); return password; } private static String sshPublicKey; /** * Generates an SSH public key. * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } private static final Pattern SUBSCRIPTION_ID_PATTERN = Pattern.compile(SUBSCRIPTION_ID_REGEX); /** * Asserts that the resource ID is same. * * @param expected the expected resource ID. * @param actual the actual resource ID. */ protected void assertResourceIdEquals(String expected, String actual) { String sanitizedExpected = SUBSCRIPTION_ID_PATTERN.matcher(expected).replaceAll(ZERO_UUID); String sanitizedActual = SUBSCRIPTION_ID_PATTERN.matcher(actual).replaceAll(ZERO_UUID); Assertions.assertTrue(sanitizedExpected.equalsIgnoreCase(sanitizedActual), String.format("expected: %s but was: %s", expected, actual)); } /** * Loads a client ID from file. * * @return A client ID loaded from a file. */ protected String clientIdFromFile() { String clientId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID); return testResourceNamer.recordValueFromConfig(clientId); } /** * Return current Azure CLI signed-in user's userPrincipalName. * * @return current Azure CLI signed-in user. */ private static String getSafeWorkingDirectory() { if (IdentityUtil.isWindowsPlatform()) { String windowsSystemRoot = System.getenv("SystemRoot"); return CoreUtils.isNullOrEmpty(windowsSystemRoot) ? null : windowsSystemRoot + "\\system32"; } else { return "/bin/"; } } /** * Gets the test profile. * @return The test profile. */ protected AzureProfile profile() { return testProfile; } /** * Checks whether test mode is {@link TestMode * @return Whether the test mode is {@link TestMode */ protected boolean isPlaybackMode() { return getTestMode() == TestMode.PLAYBACK; } /** * Checks whether test should be skipped in playback. * @return Whether the test should be skipped in playback. */ protected boolean skipInPlayback() { if (isPlaybackMode()) { isSkipInPlayback = true; } return isSkipInPlayback; } @Override protected void beforeTest() { TokenCredential credential; HttpPipeline httpPipeline; String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL); HttpLogDetailLevel httpLogDetailLevel; try { httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel); } catch (Exception e) { if (isPlaybackMode()) { httpLogDetailLevel = HttpLogDetailLevel.NONE; LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL); } else { httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS; LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL); } } if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { try { System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); } catch (UnsupportedEncodingException e) { } } if (isPlaybackMode()) { testProfile = PLAYBACK_PROFILE; List<HttpPipelinePolicy> policies = new ArrayList<>(); httpPipeline = buildHttpPipeline( new MockTokenCredential(), testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, interceptorManager.getPlaybackClient()); if (!testContextManager.doNotRecordTest()) { interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version")).setExcludedHeaders(Arrays.asList("If-Match")))); addSanitizers(); removeSanitizers(); } } else { Configuration configuration = Configuration.getGlobalConfiguration(); String tenantId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID), "'AZURE_TENANT_ID' environment variable cannot be null."); String subscriptionId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID), "'AZURE_SUBSCRIPTION_ID' environment variable cannot be null."); credential = new DefaultAzureCredentialBuilder() .authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint()) .build(); testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE); List<HttpPipelinePolicy> policies = new ArrayList<>(); if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) { policies.add(this.interceptorManager.getRecordPolicy()); addSanitizers(); removeSanitizers(); } if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) { policies.add(new HttpDebugLoggingPolicy()); httpLogDetailLevel = HttpLogDetailLevel.NONE; } httpPipeline = buildHttpPipeline( credential, testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, generateHttpClientWithProxy(null, null)); } initializeClients(httpPipeline, testProfile); } /** * Generates an {@link HttpClient} with a proxy. * * @param clientBuilder The HttpClient builder. * @param proxyOptions The proxy. * @return An HttpClient with a proxy. */ protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) { if (clientBuilder == null) { clientBuilder = new NettyAsyncHttpClientBuilder(); } if (proxyOptions != null) { clientBuilder.proxy(proxyOptions); } else { try { System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE); List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint())); if (!proxies.isEmpty()) { for (Proxy proxy : proxies) { if (proxy.address() instanceof InetSocketAddress) { String host = ((InetSocketAddress) proxy.address()).getHostName(); int port = ((InetSocketAddress) proxy.address()).getPort(); switch (proxy.type()) { case HTTP: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build(); case SOCKS: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build(); default: } } } } String host = null; int port = 0; if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) { host = System.getProperty(HTTPS_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT)); } else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) { host = System.getProperty(HTTP_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT)); } if (host != null) { clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))); } } catch (URISyntaxException ignored) { } } return clientBuilder.build(); } @Override protected void afterTest() { if (!isSkipInPlayback) { cleanUpResources(); } } /** * Sets sdk context when running the tests * * @param internalContext the internal runtime context * @param objects the manager classes to change internal context * @param <T> the type of internal context * @throws RuntimeException when field cannot be found or set. */ protected <T> void setInternalContext(T internalContext, Object... objects) { try { for (Object obj : objects) { for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) { if (field.getName().equals("resourceManager")) { setAccessible(field); Field context = field.get(obj).getClass().getDeclaredField("internalContext"); setAccessible(context); context.set(field.get(obj), internalContext); } } for (Field field : obj.getClass().getDeclaredFields()) { if (field.getName().equals("internalContext")) { setAccessible(field); field.set(obj, internalContext); } else if (field.getName().contains("Manager")) { setAccessible(field); setInternalContext(internalContext, field.get(obj)); } } } } catch (IllegalAccessException | NoSuchFieldException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } private void setAccessible(final AccessibleObject accessibleObject) { Runnable runnable = () -> accessibleObject.setAccessible(true); runnable.run(); } /** * Builds the manager with provided http pipeline and profile in general manner. * * @param manager the class of the manager * @param httpPipeline the http pipeline * @param profile the azure profile * @param <T> the type of the manager * @return the manager instance * @throws RuntimeException when field cannot be found or set. */ protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) { try { Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass()); setAccessible(constructor); return constructor.newInstance(httpPipeline, profile); } catch (ReflectiveOperationException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Builds an HttpPipeline. * * @param credential The credentials to use in the pipeline. * @param profile The AzureProfile to use in the pipeline. * @param httpLogOptions The HTTP logging options to use in the pipeline. * @param policies Additional policies to use in the pipeline. * @param httpClient The HttpClient to use in the pipeline. * @return A new constructed HttpPipeline. */ protected abstract HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient); /** * Initializes service clients used in testing. * * @param httpPipeline The HttpPipeline to use in the clients. * @param profile The AzureProfile to use in the clients. */ protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile); /** * Cleans up resources. */ protected abstract void cleanUpResources(); private void addSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList( new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER), new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"), new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY) )); sanitizers.addAll(this.sanitizers); interceptorManager.addSanitizers(sanitizers); } private void removeSanitizers() { interceptorManager.removeSanitizers("AZSDK2003", "AZSDK2030", "AZSDK3430", "AZSDK3493"); } /** * Adds test proxy sanitizers. * <p> * Recommend to call this API in subclass constructor. * * @param sanitizers the test proxy sanitizers. */ protected void addSanitizers(TestProxySanitizer... sanitizers) { this.sanitizers.addAll(Arrays.asList(sanitizers)); } private final class PlaybackTimeoutInterceptor implements InvocationInterceptor { private final Duration duration; private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) { Objects.requireNonNull(timeoutSupplier); this.duration = timeoutSupplier.get(); } @Override public void interceptTestMethod(Invocation<Void> invocation, ReflectiveInvocationContext<Method> invocationContext, ExtensionContext extensionContext) throws Throwable { if (isPlaybackMode()) { Assertions.assertTimeoutPreemptively(duration, invocation::proceed); } else { invocation.proceed(); } } } }
Yes, it can. Updated.
protected AzureUser azureCliSignedInUser() { AzureUser azureCliUser = new AzureUser(testResourceNamer); if (!isPlaybackMode()) { String azCommand = "az ad signed-in-user show --output json"; final Pattern windowsProcessErrorMessage = Pattern.compile("'azd?' is not recognized"); final Pattern shProcessErrorMessage = Pattern.compile("azd?:.*not found"); try { String starter; String switcher; if (IdentityUtil.isWindowsPlatform()) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); builder.redirectInput(ProcessBuilder.Redirect.from(IdentityUtil.NULL_FILE)); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (windowsProcessErrorMessage.matcher(line).find() || shProcessErrorMessage.matcher(line).find()) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { if (processOutput.contains("az login") || processOutput.contains("az account set")) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException("get Azure CLI current signed-in user failed", null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Get Azure CLI signed-in user => A response was received from Azure CLI, deserializing the" + " response into an signed-in user."); try (JsonReader reader = JsonProviders.createReader(processOutput)) { Map<String, Object> signedInUserInfo = reader.readMap(JsonReader::readUntyped); String userPrincipalName = (String) signedInUserInfo.get("userPrincipalName"); String id = (String) signedInUserInfo.get("id"); azureCliUser = new AzureUser(testResourceNamer, id, userPrincipalName); } } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } } return azureCliUser; }
}
protected AzureUser azureCliSignedInUser() { AzureUser azureCliUser = new AzureUser(testResourceNamer); if (!isPlaybackMode()) { String azCommand = "az ad signed-in-user show --output json"; final Pattern windowsProcessErrorMessage = Pattern.compile("'azd?' is not recognized"); final Pattern shProcessErrorMessage = Pattern.compile("azd?:.*not found"); try { String starter; String switcher; if (IdentityUtil.isWindowsPlatform()) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); builder.redirectInput(ProcessBuilder.Redirect.from(IdentityUtil.NULL_FILE)); builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (windowsProcessErrorMessage.matcher(line).find() || shProcessErrorMessage.matcher(line).find()) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { if (processOutput.contains("az login") || processOutput.contains("az account set")) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException("get Azure CLI current signed-in user failed", null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Get Azure CLI signed-in user => A response was received from Azure CLI, deserializing the" + " response into an signed-in user."); try (JsonReader reader = JsonProviders.createReader(processOutput)) { Map<String, Object> signedInUserInfo = reader.readMap(JsonReader::readUntyped); String userPrincipalName = (String) signedInUserInfo.get("userPrincipalName"); String id = (String) signedInUserInfo.get("id"); azureCliUser = new AzureUser(testResourceNamer, id, userPrincipalName); } } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } } return azureCliUser; }
class ResourceManagerTestProxyTestBase extends TestProxyTestBase { private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000"; private static final String SUBSCRIPTION_ID_REGEX = "(?<=/subscriptions/)([^/?]+)"; private static final String ZERO_SUBSCRIPTION = ZERO_UUID; private static final String ZERO_TENANT = ZERO_UUID; private static final String PLAYBACK_URI_BASE = "https: private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION"; private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL"; private static final String HTTPS_PROXY_HOST = "https.proxyHost"; private static final String HTTPS_PROXY_PORT = "https.proxyPort"; private static final String HTTP_PROXY_HOST = "http.proxyHost"; private static final String HTTP_PROXY_PORT = "http.proxyPort"; private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies"; private static final String VALUE_TRUE = "true"; private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234"; private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile( ZERO_TENANT, ZERO_SUBSCRIPTION, new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values()) .collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI))) ); private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() { @Override public void write(int b) { } @Override public void write(byte[] b) { } @Override public void write(byte[] b, int off, int len) { } }; /** * Redacted value. */ protected static final String REDACTED_VALUE = "REDACTED"; private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class); private AzureProfile testProfile; private boolean isSkipInPlayback; private final List<TestProxySanitizer> sanitizers = new ArrayList<>(); /** * Sets upper bound execution timeout for each @Test method. * {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper * bound. */ @RegisterExtension final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60)); /** * Initializes ResourceManagerTestProxyTestBase class. */ protected ResourceManagerTestProxyTestBase() { } /** * Generates a random resource name. * * @param prefix Prefix for the resource name. * @param maxLen Maximum length of the resource name. * @return A randomly generated resource name with a given prefix and maximum length. */ protected String generateRandomResourceName(String prefix, int maxLen) { return testResourceNamer.randomName(prefix, maxLen); } /** * Generates a random UUID. * @return A randomly generated UUID. */ protected String generateRandomUuid() { return testResourceNamer.randomUuid(); } /** * Generates a random password. * @return random password */ public static String password() { String password = new ResourceNamer("").randomName("Pa5$", 12); LOGGER.info("Password: {}", password); return password; } private static String sshPublicKey; /** * Generates an SSH public key. * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } private static final Pattern SUBSCRIPTION_ID_PATTERN = Pattern.compile(SUBSCRIPTION_ID_REGEX); /** * Asserts that the resource ID is same. * * @param expected the expected resource ID. * @param actual the actual resource ID. */ protected void assertResourceIdEquals(String expected, String actual) { String sanitizedExpected = SUBSCRIPTION_ID_PATTERN.matcher(expected).replaceAll(ZERO_UUID); String sanitizedActual = SUBSCRIPTION_ID_PATTERN.matcher(actual).replaceAll(ZERO_UUID); Assertions.assertTrue(sanitizedExpected.equalsIgnoreCase(sanitizedActual), String.format("expected: %s but was: %s", expected, actual)); } /** * Loads a client ID from file. * * @return A client ID loaded from a file. */ protected String clientIdFromFile() { String clientId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID); return testResourceNamer.recordValueFromConfig(clientId); } /** * Return current Azure CLI signed-in user's userPrincipalName. * * @return current Azure CLI signed-in user. */ private static String getSafeWorkingDirectory() { if (IdentityUtil.isWindowsPlatform()) { String windowsSystemRoot = System.getenv("SystemRoot"); return CoreUtils.isNullOrEmpty(windowsSystemRoot) ? null : windowsSystemRoot + "\\system32"; } else { return "/bin/"; } } /** * Gets the test profile. * @return The test profile. */ protected AzureProfile profile() { return testProfile; } /** * Checks whether test mode is {@link TestMode * @return Whether the test mode is {@link TestMode */ protected boolean isPlaybackMode() { return getTestMode() == TestMode.PLAYBACK; } /** * Checks whether test should be skipped in playback. * @return Whether the test should be skipped in playback. */ protected boolean skipInPlayback() { if (isPlaybackMode()) { isSkipInPlayback = true; } return isSkipInPlayback; } @Override protected void beforeTest() { TokenCredential credential; HttpPipeline httpPipeline; String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL); HttpLogDetailLevel httpLogDetailLevel; try { httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel); } catch (Exception e) { if (isPlaybackMode()) { httpLogDetailLevel = HttpLogDetailLevel.NONE; LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL); } else { httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS; LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL); } } if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { try { System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); } catch (UnsupportedEncodingException e) { } } if (isPlaybackMode()) { testProfile = PLAYBACK_PROFILE; List<HttpPipelinePolicy> policies = new ArrayList<>(); httpPipeline = buildHttpPipeline( new MockTokenCredential(), testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, interceptorManager.getPlaybackClient()); if (!testContextManager.doNotRecordTest()) { interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version")).setExcludedHeaders(Arrays.asList("If-Match")))); addSanitizers(); removeSanitizers(); } } else { Configuration configuration = Configuration.getGlobalConfiguration(); String tenantId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID), "'AZURE_TENANT_ID' environment variable cannot be null."); String subscriptionId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID), "'AZURE_SUBSCRIPTION_ID' environment variable cannot be null."); credential = new DefaultAzureCredentialBuilder() .authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint()) .build(); testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE); List<HttpPipelinePolicy> policies = new ArrayList<>(); if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) { policies.add(this.interceptorManager.getRecordPolicy()); addSanitizers(); removeSanitizers(); } if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) { policies.add(new HttpDebugLoggingPolicy()); httpLogDetailLevel = HttpLogDetailLevel.NONE; } httpPipeline = buildHttpPipeline( credential, testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, generateHttpClientWithProxy(null, null)); } initializeClients(httpPipeline, testProfile); } /** * Generates an {@link HttpClient} with a proxy. * * @param clientBuilder The HttpClient builder. * @param proxyOptions The proxy. * @return An HttpClient with a proxy. */ protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) { if (clientBuilder == null) { clientBuilder = new NettyAsyncHttpClientBuilder(); } if (proxyOptions != null) { clientBuilder.proxy(proxyOptions); } else { try { System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE); List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint())); if (!proxies.isEmpty()) { for (Proxy proxy : proxies) { if (proxy.address() instanceof InetSocketAddress) { String host = ((InetSocketAddress) proxy.address()).getHostName(); int port = ((InetSocketAddress) proxy.address()).getPort(); switch (proxy.type()) { case HTTP: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build(); case SOCKS: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build(); default: } } } } String host = null; int port = 0; if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) { host = System.getProperty(HTTPS_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT)); } else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) { host = System.getProperty(HTTP_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT)); } if (host != null) { clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))); } } catch (URISyntaxException ignored) { } } return clientBuilder.build(); } @Override protected void afterTest() { if (!isSkipInPlayback) { cleanUpResources(); } } /** * Sets sdk context when running the tests * * @param internalContext the internal runtime context * @param objects the manager classes to change internal context * @param <T> the type of internal context * @throws RuntimeException when field cannot be found or set. */ protected <T> void setInternalContext(T internalContext, Object... objects) { try { for (Object obj : objects) { for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) { if (field.getName().equals("resourceManager")) { setAccessible(field); Field context = field.get(obj).getClass().getDeclaredField("internalContext"); setAccessible(context); context.set(field.get(obj), internalContext); } } for (Field field : obj.getClass().getDeclaredFields()) { if (field.getName().equals("internalContext")) { setAccessible(field); field.set(obj, internalContext); } else if (field.getName().contains("Manager")) { setAccessible(field); setInternalContext(internalContext, field.get(obj)); } } } } catch (IllegalAccessException | NoSuchFieldException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } private void setAccessible(final AccessibleObject accessibleObject) { Runnable runnable = () -> accessibleObject.setAccessible(true); runnable.run(); } /** * Builds the manager with provided http pipeline and profile in general manner. * * @param manager the class of the manager * @param httpPipeline the http pipeline * @param profile the azure profile * @param <T> the type of the manager * @return the manager instance * @throws RuntimeException when field cannot be found or set. */ protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) { try { Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass()); setAccessible(constructor); return constructor.newInstance(httpPipeline, profile); } catch (ReflectiveOperationException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Builds an HttpPipeline. * * @param credential The credentials to use in the pipeline. * @param profile The AzureProfile to use in the pipeline. * @param httpLogOptions The HTTP logging options to use in the pipeline. * @param policies Additional policies to use in the pipeline. * @param httpClient The HttpClient to use in the pipeline. * @return A new constructed HttpPipeline. */ protected abstract HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient); /** * Initializes service clients used in testing. * * @param httpPipeline The HttpPipeline to use in the clients. * @param profile The AzureProfile to use in the clients. */ protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile); /** * Cleans up resources. */ protected abstract void cleanUpResources(); private void addSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList( new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER), new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"), new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY) )); sanitizers.addAll(this.sanitizers); interceptorManager.addSanitizers(sanitizers); } private void removeSanitizers() { interceptorManager.removeSanitizers("AZSDK2003", "AZSDK2030", "AZSDK3430", "AZSDK3493"); } /** * Adds test proxy sanitizers. * <p> * Recommend to call this API in subclass constructor. * * @param sanitizers the test proxy sanitizers. */ protected void addSanitizers(TestProxySanitizer... sanitizers) { this.sanitizers.addAll(Arrays.asList(sanitizers)); } private final class PlaybackTimeoutInterceptor implements InvocationInterceptor { private final Duration duration; private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) { Objects.requireNonNull(timeoutSupplier); this.duration = timeoutSupplier.get(); } @Override public void interceptTestMethod(Invocation<Void> invocation, ReflectiveInvocationContext<Method> invocationContext, ExtensionContext extensionContext) throws Throwable { if (isPlaybackMode()) { Assertions.assertTimeoutPreemptively(duration, invocation::proceed); } else { invocation.proceed(); } } } }
class ResourceManagerTestProxyTestBase extends TestProxyTestBase { private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000"; private static final String SUBSCRIPTION_ID_REGEX = "(?<=/subscriptions/)([^/?]+)"; private static final String ZERO_SUBSCRIPTION = ZERO_UUID; private static final String ZERO_TENANT = ZERO_UUID; private static final String PLAYBACK_URI_BASE = "https: private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION"; private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL"; private static final String HTTPS_PROXY_HOST = "https.proxyHost"; private static final String HTTPS_PROXY_PORT = "https.proxyPort"; private static final String HTTP_PROXY_HOST = "http.proxyHost"; private static final String HTTP_PROXY_PORT = "http.proxyPort"; private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies"; private static final String VALUE_TRUE = "true"; private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234"; private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile( ZERO_TENANT, ZERO_SUBSCRIPTION, new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values()) .collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI))) ); private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() { @Override public void write(int b) { } @Override public void write(byte[] b) { } @Override public void write(byte[] b, int off, int len) { } }; /** * Redacted value. */ protected static final String REDACTED_VALUE = "REDACTED"; private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class); private AzureProfile testProfile; private boolean isSkipInPlayback; private final List<TestProxySanitizer> sanitizers = new ArrayList<>(); /** * Sets upper bound execution timeout for each @Test method. * {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper * bound. */ @RegisterExtension final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60)); /** * Initializes ResourceManagerTestProxyTestBase class. */ protected ResourceManagerTestProxyTestBase() { } /** * Generates a random resource name. * * @param prefix Prefix for the resource name. * @param maxLen Maximum length of the resource name. * @return A randomly generated resource name with a given prefix and maximum length. */ protected String generateRandomResourceName(String prefix, int maxLen) { return testResourceNamer.randomName(prefix, maxLen); } /** * Generates a random UUID. * @return A randomly generated UUID. */ protected String generateRandomUuid() { return testResourceNamer.randomUuid(); } /** * Generates a random password. * @return random password */ public static String password() { String password = new ResourceNamer("").randomName("Pa5$", 12); LOGGER.info("Password: {}", password); return password; } private static String sshPublicKey; /** * Generates an SSH public key. * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } private static final Pattern SUBSCRIPTION_ID_PATTERN = Pattern.compile(SUBSCRIPTION_ID_REGEX); /** * Asserts that the resource ID is same. * * @param expected the expected resource ID. * @param actual the actual resource ID. */ protected void assertResourceIdEquals(String expected, String actual) { String sanitizedExpected = SUBSCRIPTION_ID_PATTERN.matcher(expected).replaceAll(ZERO_UUID); String sanitizedActual = SUBSCRIPTION_ID_PATTERN.matcher(actual).replaceAll(ZERO_UUID); Assertions.assertTrue(sanitizedExpected.equalsIgnoreCase(sanitizedActual), String.format("expected: %s but was: %s", expected, actual)); } /** * Loads a client ID from file. * * @return A client ID loaded from a file. */ protected String clientIdFromFile() { String clientId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID); return testResourceNamer.recordValueFromConfig(clientId); } /** * Return current Azure CLI signed-in user's userPrincipalName. * * @return current Azure CLI signed-in user. */ private static String getSafeWorkingDirectory() { if (IdentityUtil.isWindowsPlatform()) { String windowsSystemRoot = System.getenv("SystemRoot"); return CoreUtils.isNullOrEmpty(windowsSystemRoot) ? null : windowsSystemRoot + "\\system32"; } else { return "/bin/"; } } /** * Gets the test profile. * @return The test profile. */ protected AzureProfile profile() { return testProfile; } /** * Checks whether test mode is {@link TestMode * @return Whether the test mode is {@link TestMode */ protected boolean isPlaybackMode() { return getTestMode() == TestMode.PLAYBACK; } /** * Checks whether test should be skipped in playback. * @return Whether the test should be skipped in playback. */ protected boolean skipInPlayback() { if (isPlaybackMode()) { isSkipInPlayback = true; } return isSkipInPlayback; } @Override protected void beforeTest() { TokenCredential credential; HttpPipeline httpPipeline; String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL); HttpLogDetailLevel httpLogDetailLevel; try { httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel); } catch (Exception e) { if (isPlaybackMode()) { httpLogDetailLevel = HttpLogDetailLevel.NONE; LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL); } else { httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS; LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL); } } if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { try { System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); } catch (UnsupportedEncodingException e) { } } if (isPlaybackMode()) { testProfile = PLAYBACK_PROFILE; List<HttpPipelinePolicy> policies = new ArrayList<>(); httpPipeline = buildHttpPipeline( new MockTokenCredential(), testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, interceptorManager.getPlaybackClient()); if (!testContextManager.doNotRecordTest()) { interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version")).setExcludedHeaders(Arrays.asList("If-Match")))); addSanitizers(); removeSanitizers(); } } else { Configuration configuration = Configuration.getGlobalConfiguration(); String tenantId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID), "'AZURE_TENANT_ID' environment variable cannot be null."); String subscriptionId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID), "'AZURE_SUBSCRIPTION_ID' environment variable cannot be null."); credential = new DefaultAzureCredentialBuilder() .authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint()) .build(); testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE); List<HttpPipelinePolicy> policies = new ArrayList<>(); if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) { policies.add(this.interceptorManager.getRecordPolicy()); addSanitizers(); removeSanitizers(); } if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) { policies.add(new HttpDebugLoggingPolicy()); httpLogDetailLevel = HttpLogDetailLevel.NONE; } httpPipeline = buildHttpPipeline( credential, testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, generateHttpClientWithProxy(null, null)); } initializeClients(httpPipeline, testProfile); } /** * Generates an {@link HttpClient} with a proxy. * * @param clientBuilder The HttpClient builder. * @param proxyOptions The proxy. * @return An HttpClient with a proxy. */ protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) { if (clientBuilder == null) { clientBuilder = new NettyAsyncHttpClientBuilder(); } if (proxyOptions != null) { clientBuilder.proxy(proxyOptions); } else { try { System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE); List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint())); if (!proxies.isEmpty()) { for (Proxy proxy : proxies) { if (proxy.address() instanceof InetSocketAddress) { String host = ((InetSocketAddress) proxy.address()).getHostName(); int port = ((InetSocketAddress) proxy.address()).getPort(); switch (proxy.type()) { case HTTP: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build(); case SOCKS: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build(); default: } } } } String host = null; int port = 0; if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) { host = System.getProperty(HTTPS_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT)); } else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) { host = System.getProperty(HTTP_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT)); } if (host != null) { clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))); } } catch (URISyntaxException ignored) { } } return clientBuilder.build(); } @Override protected void afterTest() { if (!isSkipInPlayback) { cleanUpResources(); } } /** * Sets sdk context when running the tests * * @param internalContext the internal runtime context * @param objects the manager classes to change internal context * @param <T> the type of internal context * @throws RuntimeException when field cannot be found or set. */ protected <T> void setInternalContext(T internalContext, Object... objects) { try { for (Object obj : objects) { for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) { if (field.getName().equals("resourceManager")) { setAccessible(field); Field context = field.get(obj).getClass().getDeclaredField("internalContext"); setAccessible(context); context.set(field.get(obj), internalContext); } } for (Field field : obj.getClass().getDeclaredFields()) { if (field.getName().equals("internalContext")) { setAccessible(field); field.set(obj, internalContext); } else if (field.getName().contains("Manager")) { setAccessible(field); setInternalContext(internalContext, field.get(obj)); } } } } catch (IllegalAccessException | NoSuchFieldException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } private void setAccessible(final AccessibleObject accessibleObject) { Runnable runnable = () -> accessibleObject.setAccessible(true); runnable.run(); } /** * Builds the manager with provided http pipeline and profile in general manner. * * @param manager the class of the manager * @param httpPipeline the http pipeline * @param profile the azure profile * @param <T> the type of the manager * @return the manager instance * @throws RuntimeException when field cannot be found or set. */ protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) { try { Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass()); setAccessible(constructor); return constructor.newInstance(httpPipeline, profile); } catch (ReflectiveOperationException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Builds an HttpPipeline. * * @param credential The credentials to use in the pipeline. * @param profile The AzureProfile to use in the pipeline. * @param httpLogOptions The HTTP logging options to use in the pipeline. * @param policies Additional policies to use in the pipeline. * @param httpClient The HttpClient to use in the pipeline. * @return A new constructed HttpPipeline. */ protected abstract HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient); /** * Initializes service clients used in testing. * * @param httpPipeline The HttpPipeline to use in the clients. * @param profile The AzureProfile to use in the clients. */ protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile); /** * Cleans up resources. */ protected abstract void cleanUpResources(); private void addSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList( new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER), new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"), new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY) )); sanitizers.addAll(this.sanitizers); interceptorManager.addSanitizers(sanitizers); } private void removeSanitizers() { interceptorManager.removeSanitizers("AZSDK2003", "AZSDK2030", "AZSDK3430", "AZSDK3493"); } /** * Adds test proxy sanitizers. * <p> * Recommend to call this API in subclass constructor. * * @param sanitizers the test proxy sanitizers. */ protected void addSanitizers(TestProxySanitizer... sanitizers) { this.sanitizers.addAll(Arrays.asList(sanitizers)); } private final class PlaybackTimeoutInterceptor implements InvocationInterceptor { private final Duration duration; private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) { Objects.requireNonNull(timeoutSupplier); this.duration = timeoutSupplier.get(); } @Override public void interceptTestMethod(Invocation<Void> invocation, ReflectiveInvocationContext<Method> invocationContext, ExtensionContext extensionContext) throws Throwable { if (isPlaybackMode()) { Assertions.assertTimeoutPreemptively(duration, invocation::proceed); } else { invocation.proceed(); } } } }
CI/IDE didn't report issue. Guess it's fine.
protected AzureUser azureCliSignedInUser() { AzureUser azureCliUser = new AzureUser(testResourceNamer); if (!isPlaybackMode()) { String azCommand = "az ad signed-in-user show --output json"; final Pattern windowsProcessErrorMessage = Pattern.compile("'azd?' is not recognized"); final Pattern shProcessErrorMessage = Pattern.compile("azd?:.*not found"); try { String starter; String switcher; if (IdentityUtil.isWindowsPlatform()) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); builder.redirectInput(ProcessBuilder.Redirect.from(IdentityUtil.NULL_FILE)); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (windowsProcessErrorMessage.matcher(line).find() || shProcessErrorMessage.matcher(line).find()) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { if (processOutput.contains("az login") || processOutput.contains("az account set")) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException("get Azure CLI current signed-in user failed", null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Get Azure CLI signed-in user => A response was received from Azure CLI, deserializing the" + " response into an signed-in user."); try (JsonReader reader = JsonProviders.createReader(processOutput)) { Map<String, Object> signedInUserInfo = reader.readMap(JsonReader::readUntyped); String userPrincipalName = (String) signedInUserInfo.get("userPrincipalName"); String id = (String) signedInUserInfo.get("id"); azureCliUser = new AzureUser(testResourceNamer, id, userPrincipalName); } } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } } return azureCliUser; }
String userPrincipalName = (String) signedInUserInfo.get("userPrincipalName");
protected AzureUser azureCliSignedInUser() { AzureUser azureCliUser = new AzureUser(testResourceNamer); if (!isPlaybackMode()) { String azCommand = "az ad signed-in-user show --output json"; final Pattern windowsProcessErrorMessage = Pattern.compile("'azd?' is not recognized"); final Pattern shProcessErrorMessage = Pattern.compile("azd?:.*not found"); try { String starter; String switcher; if (IdentityUtil.isWindowsPlatform()) { starter = "cmd.exe"; switcher = "/c"; } else { starter = "/bin/sh"; switcher = "-c"; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); builder.redirectInput(ProcessBuilder.Redirect.from(IdentityUtil.NULL_FILE)); builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (windowsProcessErrorMessage.matcher(line).find() || shProcessErrorMessage.matcher(line).find()) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(10, TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { if (processOutput.contains("az login") || processOutput.contains("az account set")) { throw LOGGER.logExceptionAsError(new RuntimeException("AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException("get Azure CLI current signed-in user failed", null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Get Azure CLI signed-in user => A response was received from Azure CLI, deserializing the" + " response into an signed-in user."); try (JsonReader reader = JsonProviders.createReader(processOutput)) { Map<String, Object> signedInUserInfo = reader.readMap(JsonReader::readUntyped); String userPrincipalName = (String) signedInUserInfo.get("userPrincipalName"); String id = (String) signedInUserInfo.get("id"); azureCliUser = new AzureUser(testResourceNamer, id, userPrincipalName); } } catch (IOException | InterruptedException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } } return azureCliUser; }
class ResourceManagerTestProxyTestBase extends TestProxyTestBase { private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000"; private static final String SUBSCRIPTION_ID_REGEX = "(?<=/subscriptions/)([^/?]+)"; private static final String ZERO_SUBSCRIPTION = ZERO_UUID; private static final String ZERO_TENANT = ZERO_UUID; private static final String PLAYBACK_URI_BASE = "https: private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION"; private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL"; private static final String HTTPS_PROXY_HOST = "https.proxyHost"; private static final String HTTPS_PROXY_PORT = "https.proxyPort"; private static final String HTTP_PROXY_HOST = "http.proxyHost"; private static final String HTTP_PROXY_PORT = "http.proxyPort"; private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies"; private static final String VALUE_TRUE = "true"; private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234"; private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile( ZERO_TENANT, ZERO_SUBSCRIPTION, new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values()) .collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI))) ); private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() { @Override public void write(int b) { } @Override public void write(byte[] b) { } @Override public void write(byte[] b, int off, int len) { } }; /** * Redacted value. */ protected static final String REDACTED_VALUE = "REDACTED"; private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class); private AzureProfile testProfile; private boolean isSkipInPlayback; private final List<TestProxySanitizer> sanitizers = new ArrayList<>(); /** * Sets upper bound execution timeout for each @Test method. * {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper * bound. */ @RegisterExtension final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60)); /** * Initializes ResourceManagerTestProxyTestBase class. */ protected ResourceManagerTestProxyTestBase() { } /** * Generates a random resource name. * * @param prefix Prefix for the resource name. * @param maxLen Maximum length of the resource name. * @return A randomly generated resource name with a given prefix and maximum length. */ protected String generateRandomResourceName(String prefix, int maxLen) { return testResourceNamer.randomName(prefix, maxLen); } /** * Generates a random UUID. * @return A randomly generated UUID. */ protected String generateRandomUuid() { return testResourceNamer.randomUuid(); } /** * Generates a random password. * @return random password */ public static String password() { String password = new ResourceNamer("").randomName("Pa5$", 12); LOGGER.info("Password: {}", password); return password; } private static String sshPublicKey; /** * Generates an SSH public key. * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } private static final Pattern SUBSCRIPTION_ID_PATTERN = Pattern.compile(SUBSCRIPTION_ID_REGEX); /** * Asserts that the resource ID is same. * * @param expected the expected resource ID. * @param actual the actual resource ID. */ protected void assertResourceIdEquals(String expected, String actual) { String sanitizedExpected = SUBSCRIPTION_ID_PATTERN.matcher(expected).replaceAll(ZERO_UUID); String sanitizedActual = SUBSCRIPTION_ID_PATTERN.matcher(actual).replaceAll(ZERO_UUID); Assertions.assertTrue(sanitizedExpected.equalsIgnoreCase(sanitizedActual), String.format("expected: %s but was: %s", expected, actual)); } /** * Loads a client ID from file. * * @return A client ID loaded from a file. */ protected String clientIdFromFile() { String clientId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID); return testResourceNamer.recordValueFromConfig(clientId); } /** * Return current Azure CLI signed-in user's userPrincipalName. * * @return current Azure CLI signed-in user. */ private static String getSafeWorkingDirectory() { if (IdentityUtil.isWindowsPlatform()) { String windowsSystemRoot = System.getenv("SystemRoot"); return CoreUtils.isNullOrEmpty(windowsSystemRoot) ? null : windowsSystemRoot + "\\system32"; } else { return "/bin/"; } } /** * Gets the test profile. * @return The test profile. */ protected AzureProfile profile() { return testProfile; } /** * Checks whether test mode is {@link TestMode * @return Whether the test mode is {@link TestMode */ protected boolean isPlaybackMode() { return getTestMode() == TestMode.PLAYBACK; } /** * Checks whether test should be skipped in playback. * @return Whether the test should be skipped in playback. */ protected boolean skipInPlayback() { if (isPlaybackMode()) { isSkipInPlayback = true; } return isSkipInPlayback; } @Override protected void beforeTest() { TokenCredential credential; HttpPipeline httpPipeline; String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL); HttpLogDetailLevel httpLogDetailLevel; try { httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel); } catch (Exception e) { if (isPlaybackMode()) { httpLogDetailLevel = HttpLogDetailLevel.NONE; LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL); } else { httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS; LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL); } } if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { try { System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); } catch (UnsupportedEncodingException e) { } } if (isPlaybackMode()) { testProfile = PLAYBACK_PROFILE; List<HttpPipelinePolicy> policies = new ArrayList<>(); httpPipeline = buildHttpPipeline( new MockTokenCredential(), testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, interceptorManager.getPlaybackClient()); if (!testContextManager.doNotRecordTest()) { interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version")).setExcludedHeaders(Arrays.asList("If-Match")))); addSanitizers(); removeSanitizers(); } } else { Configuration configuration = Configuration.getGlobalConfiguration(); String tenantId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID), "'AZURE_TENANT_ID' environment variable cannot be null."); String subscriptionId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID), "'AZURE_SUBSCRIPTION_ID' environment variable cannot be null."); credential = new DefaultAzureCredentialBuilder() .authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint()) .build(); testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE); List<HttpPipelinePolicy> policies = new ArrayList<>(); if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) { policies.add(this.interceptorManager.getRecordPolicy()); addSanitizers(); removeSanitizers(); } if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) { policies.add(new HttpDebugLoggingPolicy()); httpLogDetailLevel = HttpLogDetailLevel.NONE; } httpPipeline = buildHttpPipeline( credential, testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, generateHttpClientWithProxy(null, null)); } initializeClients(httpPipeline, testProfile); } /** * Generates an {@link HttpClient} with a proxy. * * @param clientBuilder The HttpClient builder. * @param proxyOptions The proxy. * @return An HttpClient with a proxy. */ protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) { if (clientBuilder == null) { clientBuilder = new NettyAsyncHttpClientBuilder(); } if (proxyOptions != null) { clientBuilder.proxy(proxyOptions); } else { try { System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE); List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint())); if (!proxies.isEmpty()) { for (Proxy proxy : proxies) { if (proxy.address() instanceof InetSocketAddress) { String host = ((InetSocketAddress) proxy.address()).getHostName(); int port = ((InetSocketAddress) proxy.address()).getPort(); switch (proxy.type()) { case HTTP: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build(); case SOCKS: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build(); default: } } } } String host = null; int port = 0; if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) { host = System.getProperty(HTTPS_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT)); } else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) { host = System.getProperty(HTTP_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT)); } if (host != null) { clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))); } } catch (URISyntaxException ignored) { } } return clientBuilder.build(); } @Override protected void afterTest() { if (!isSkipInPlayback) { cleanUpResources(); } } /** * Sets sdk context when running the tests * * @param internalContext the internal runtime context * @param objects the manager classes to change internal context * @param <T> the type of internal context * @throws RuntimeException when field cannot be found or set. */ protected <T> void setInternalContext(T internalContext, Object... objects) { try { for (Object obj : objects) { for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) { if (field.getName().equals("resourceManager")) { setAccessible(field); Field context = field.get(obj).getClass().getDeclaredField("internalContext"); setAccessible(context); context.set(field.get(obj), internalContext); } } for (Field field : obj.getClass().getDeclaredFields()) { if (field.getName().equals("internalContext")) { setAccessible(field); field.set(obj, internalContext); } else if (field.getName().contains("Manager")) { setAccessible(field); setInternalContext(internalContext, field.get(obj)); } } } } catch (IllegalAccessException | NoSuchFieldException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } private void setAccessible(final AccessibleObject accessibleObject) { Runnable runnable = () -> accessibleObject.setAccessible(true); runnable.run(); } /** * Builds the manager with provided http pipeline and profile in general manner. * * @param manager the class of the manager * @param httpPipeline the http pipeline * @param profile the azure profile * @param <T> the type of the manager * @return the manager instance * @throws RuntimeException when field cannot be found or set. */ protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) { try { Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass()); setAccessible(constructor); return constructor.newInstance(httpPipeline, profile); } catch (ReflectiveOperationException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Builds an HttpPipeline. * * @param credential The credentials to use in the pipeline. * @param profile The AzureProfile to use in the pipeline. * @param httpLogOptions The HTTP logging options to use in the pipeline. * @param policies Additional policies to use in the pipeline. * @param httpClient The HttpClient to use in the pipeline. * @return A new constructed HttpPipeline. */ protected abstract HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient); /** * Initializes service clients used in testing. * * @param httpPipeline The HttpPipeline to use in the clients. * @param profile The AzureProfile to use in the clients. */ protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile); /** * Cleans up resources. */ protected abstract void cleanUpResources(); private void addSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList( new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER), new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"), new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY) )); sanitizers.addAll(this.sanitizers); interceptorManager.addSanitizers(sanitizers); } private void removeSanitizers() { interceptorManager.removeSanitizers("AZSDK2003", "AZSDK2030", "AZSDK3430", "AZSDK3493"); } /** * Adds test proxy sanitizers. * <p> * Recommend to call this API in subclass constructor. * * @param sanitizers the test proxy sanitizers. */ protected void addSanitizers(TestProxySanitizer... sanitizers) { this.sanitizers.addAll(Arrays.asList(sanitizers)); } private final class PlaybackTimeoutInterceptor implements InvocationInterceptor { private final Duration duration; private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) { Objects.requireNonNull(timeoutSupplier); this.duration = timeoutSupplier.get(); } @Override public void interceptTestMethod(Invocation<Void> invocation, ReflectiveInvocationContext<Method> invocationContext, ExtensionContext extensionContext) throws Throwable { if (isPlaybackMode()) { Assertions.assertTimeoutPreemptively(duration, invocation::proceed); } else { invocation.proceed(); } } } }
class ResourceManagerTestProxyTestBase extends TestProxyTestBase { private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000"; private static final String SUBSCRIPTION_ID_REGEX = "(?<=/subscriptions/)([^/?]+)"; private static final String ZERO_SUBSCRIPTION = ZERO_UUID; private static final String ZERO_TENANT = ZERO_UUID; private static final String PLAYBACK_URI_BASE = "https: private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION"; private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL"; private static final String HTTPS_PROXY_HOST = "https.proxyHost"; private static final String HTTPS_PROXY_PORT = "https.proxyPort"; private static final String HTTP_PROXY_HOST = "http.proxyHost"; private static final String HTTP_PROXY_PORT = "http.proxyPort"; private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies"; private static final String VALUE_TRUE = "true"; private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234"; private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile( ZERO_TENANT, ZERO_SUBSCRIPTION, new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values()) .collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI))) ); private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() { @Override public void write(int b) { } @Override public void write(byte[] b) { } @Override public void write(byte[] b, int off, int len) { } }; /** * Redacted value. */ protected static final String REDACTED_VALUE = "REDACTED"; private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class); private AzureProfile testProfile; private boolean isSkipInPlayback; private final List<TestProxySanitizer> sanitizers = new ArrayList<>(); /** * Sets upper bound execution timeout for each @Test method. * {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper * bound. */ @RegisterExtension final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60)); /** * Initializes ResourceManagerTestProxyTestBase class. */ protected ResourceManagerTestProxyTestBase() { } /** * Generates a random resource name. * * @param prefix Prefix for the resource name. * @param maxLen Maximum length of the resource name. * @return A randomly generated resource name with a given prefix and maximum length. */ protected String generateRandomResourceName(String prefix, int maxLen) { return testResourceNamer.randomName(prefix, maxLen); } /** * Generates a random UUID. * @return A randomly generated UUID. */ protected String generateRandomUuid() { return testResourceNamer.randomUuid(); } /** * Generates a random password. * @return random password */ public static String password() { String password = new ResourceNamer("").randomName("Pa5$", 12); LOGGER.info("Password: {}", password); return password; } private static String sshPublicKey; /** * Generates an SSH public key. * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } private static final Pattern SUBSCRIPTION_ID_PATTERN = Pattern.compile(SUBSCRIPTION_ID_REGEX); /** * Asserts that the resource ID is same. * * @param expected the expected resource ID. * @param actual the actual resource ID. */ protected void assertResourceIdEquals(String expected, String actual) { String sanitizedExpected = SUBSCRIPTION_ID_PATTERN.matcher(expected).replaceAll(ZERO_UUID); String sanitizedActual = SUBSCRIPTION_ID_PATTERN.matcher(actual).replaceAll(ZERO_UUID); Assertions.assertTrue(sanitizedExpected.equalsIgnoreCase(sanitizedActual), String.format("expected: %s but was: %s", expected, actual)); } /** * Loads a client ID from file. * * @return A client ID loaded from a file. */ protected String clientIdFromFile() { String clientId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID); return testResourceNamer.recordValueFromConfig(clientId); } /** * Return current Azure CLI signed-in user's userPrincipalName. * * @return current Azure CLI signed-in user. */ private static String getSafeWorkingDirectory() { if (IdentityUtil.isWindowsPlatform()) { String windowsSystemRoot = System.getenv("SystemRoot"); return CoreUtils.isNullOrEmpty(windowsSystemRoot) ? null : windowsSystemRoot + "\\system32"; } else { return "/bin/"; } } /** * Gets the test profile. * @return The test profile. */ protected AzureProfile profile() { return testProfile; } /** * Checks whether test mode is {@link TestMode * @return Whether the test mode is {@link TestMode */ protected boolean isPlaybackMode() { return getTestMode() == TestMode.PLAYBACK; } /** * Checks whether test should be skipped in playback. * @return Whether the test should be skipped in playback. */ protected boolean skipInPlayback() { if (isPlaybackMode()) { isSkipInPlayback = true; } return isSkipInPlayback; } @Override protected void beforeTest() { TokenCredential credential; HttpPipeline httpPipeline; String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL); HttpLogDetailLevel httpLogDetailLevel; try { httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel); } catch (Exception e) { if (isPlaybackMode()) { httpLogDetailLevel = HttpLogDetailLevel.NONE; LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL); } else { httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS; LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL); } } if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { try { System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name())); } catch (UnsupportedEncodingException e) { } } if (isPlaybackMode()) { testProfile = PLAYBACK_PROFILE; List<HttpPipelinePolicy> policies = new ArrayList<>(); httpPipeline = buildHttpPipeline( new MockTokenCredential(), testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, interceptorManager.getPlaybackClient()); if (!testContextManager.doNotRecordTest()) { interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version")).setExcludedHeaders(Arrays.asList("If-Match")))); addSanitizers(); removeSanitizers(); } } else { Configuration configuration = Configuration.getGlobalConfiguration(); String tenantId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID), "'AZURE_TENANT_ID' environment variable cannot be null."); String subscriptionId = Objects.requireNonNull( configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID), "'AZURE_SUBSCRIPTION_ID' environment variable cannot be null."); credential = new DefaultAzureCredentialBuilder() .authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint()) .build(); testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE); List<HttpPipelinePolicy> policies = new ArrayList<>(); if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) { policies.add(this.interceptorManager.getRecordPolicy()); addSanitizers(); removeSanitizers(); } if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) { policies.add(new HttpDebugLoggingPolicy()); httpLogDetailLevel = HttpLogDetailLevel.NONE; } httpPipeline = buildHttpPipeline( credential, testProfile, new HttpLogOptions().setLogLevel(httpLogDetailLevel), policies, generateHttpClientWithProxy(null, null)); } initializeClients(httpPipeline, testProfile); } /** * Generates an {@link HttpClient} with a proxy. * * @param clientBuilder The HttpClient builder. * @param proxyOptions The proxy. * @return An HttpClient with a proxy. */ protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) { if (clientBuilder == null) { clientBuilder = new NettyAsyncHttpClientBuilder(); } if (proxyOptions != null) { clientBuilder.proxy(proxyOptions); } else { try { System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE); List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint())); if (!proxies.isEmpty()) { for (Proxy proxy : proxies) { if (proxy.address() instanceof InetSocketAddress) { String host = ((InetSocketAddress) proxy.address()).getHostName(); int port = ((InetSocketAddress) proxy.address()).getPort(); switch (proxy.type()) { case HTTP: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build(); case SOCKS: return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build(); default: } } } } String host = null; int port = 0; if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) { host = System.getProperty(HTTPS_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT)); } else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) { host = System.getProperty(HTTP_PROXY_HOST); port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT)); } if (host != null) { clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))); } } catch (URISyntaxException ignored) { } } return clientBuilder.build(); } @Override protected void afterTest() { if (!isSkipInPlayback) { cleanUpResources(); } } /** * Sets sdk context when running the tests * * @param internalContext the internal runtime context * @param objects the manager classes to change internal context * @param <T> the type of internal context * @throws RuntimeException when field cannot be found or set. */ protected <T> void setInternalContext(T internalContext, Object... objects) { try { for (Object obj : objects) { for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) { if (field.getName().equals("resourceManager")) { setAccessible(field); Field context = field.get(obj).getClass().getDeclaredField("internalContext"); setAccessible(context); context.set(field.get(obj), internalContext); } } for (Field field : obj.getClass().getDeclaredFields()) { if (field.getName().equals("internalContext")) { setAccessible(field); field.set(obj, internalContext); } else if (field.getName().contains("Manager")) { setAccessible(field); setInternalContext(internalContext, field.get(obj)); } } } } catch (IllegalAccessException | NoSuchFieldException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } private void setAccessible(final AccessibleObject accessibleObject) { Runnable runnable = () -> accessibleObject.setAccessible(true); runnable.run(); } /** * Builds the manager with provided http pipeline and profile in general manner. * * @param manager the class of the manager * @param httpPipeline the http pipeline * @param profile the azure profile * @param <T> the type of the manager * @return the manager instance * @throws RuntimeException when field cannot be found or set. */ protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) { try { Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass()); setAccessible(constructor); return constructor.newInstance(httpPipeline, profile); } catch (ReflectiveOperationException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } } /** * Builds an HttpPipeline. * * @param credential The credentials to use in the pipeline. * @param profile The AzureProfile to use in the pipeline. * @param httpLogOptions The HTTP logging options to use in the pipeline. * @param policies Additional policies to use in the pipeline. * @param httpClient The HttpClient to use in the pipeline. * @return A new constructed HttpPipeline. */ protected abstract HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient); /** * Initializes service clients used in testing. * * @param httpPipeline The HttpPipeline to use in the clients. * @param profile The AzureProfile to use in the clients. */ protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile); /** * Cleans up resources. */ protected abstract void cleanUpResources(); private void addSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList( new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER), new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"), new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY) )); sanitizers.addAll(this.sanitizers); interceptorManager.addSanitizers(sanitizers); } private void removeSanitizers() { interceptorManager.removeSanitizers("AZSDK2003", "AZSDK2030", "AZSDK3430", "AZSDK3493"); } /** * Adds test proxy sanitizers. * <p> * Recommend to call this API in subclass constructor. * * @param sanitizers the test proxy sanitizers. */ protected void addSanitizers(TestProxySanitizer... sanitizers) { this.sanitizers.addAll(Arrays.asList(sanitizers)); } private final class PlaybackTimeoutInterceptor implements InvocationInterceptor { private final Duration duration; private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) { Objects.requireNonNull(timeoutSupplier); this.duration = timeoutSupplier.get(); } @Override public void interceptTestMethod(Invocation<Void> invocation, ReflectiveInvocationContext<Method> invocationContext, ExtensionContext extensionContext) throws Throwable { if (isPlaybackMode()) { Assertions.assertTimeoutPreemptively(duration, invocation::proceed); } else { invocation.proceed(); } } } }
Why do you use a String Builder? This can all be done on a simple String?
public static String getCollectionPath(String resourceFullName) { StringBuilder trimmedResourceFullName = new StringBuilder(); if (resourceFullName != null) { trimmedResourceFullName.append(Utils.trimBeginningAndEndingSlashes(resourceFullName)); int index = indexOfNth(trimmedResourceFullName.toString(), '/', 4); if (index > 0) return trimmedResourceFullName.substring(0, index); } return trimmedResourceFullName.length() == 0 ? resourceFullName : trimmedResourceFullName.toString(); }
StringBuilder trimmedResourceFullName = new StringBuilder();
public static String getCollectionPath(String resourceFullName) { if (resourceFullName != null) { String trimmedResourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName); int index = indexOfNth(trimmedResourceFullName.toString(), '/', 4); if (index > 0) { return trimmedResourceFullName.substring(0, index); } else { return trimmedResourceFullName; } } return resourceFullName; }
class PathsHelper { private final static Logger logger = LoggerFactory.getLogger(PathsHelper.class); public static String generatePath(ResourceType resourceType, RxDocumentServiceRequest request, boolean isFeed) { if (request.getIsNameBased()) { return generatePathForNameBased(resourceType, request.getResourceAddress(), isFeed, request.getOperationType()); } else { return generatePath(resourceType, request.getResourceId(), isFeed, request.getOperationType()); } } public static String generatePathForNameBased(Resource resourceType, String resourceOwnerFullName, String resourceName) { if (resourceName == null) return null; if (resourceType instanceof Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; } else if (resourceOwnerFullName == null) { return null; } else if (resourceType instanceof DocumentCollection) { return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof StoredProcedure) { return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof UserDefinedFunction) { return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Trigger) { return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Conflict) { return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof User) { return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Permission) { return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Document) { return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Resource) { return null; } String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } private static String generatePathForNameBased(ResourceType resourceType, String resourceFullName, boolean isFeed, OperationType operationType) { if (isFeed && Strings.isNullOrEmpty(resourceFullName) && resourceType != ResourceType.Database) { String errorMessage = String.format(RMResources.UnexpectedResourceType, resourceType); throw new IllegalArgumentException(errorMessage); } String resourcePath = null; if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { resourcePath = resourceFullName + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } else if (!isFeed) { resourcePath = resourceFullName; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { resourcePath = resourceFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { resourcePath = resourceFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { resourcePath = resourceFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { resourcePath = resourceFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { resourcePath = resourceFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { resourcePath = resourceFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { resourcePath = resourceFullName + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { resourcePath = resourceFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { resourcePath = resourceFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return resourceFullName + "/" + Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { return resourceFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.Schema) { resourcePath = resourceFullName + "/" + Paths.SCHEMAS_PATH_SEGMENT; } else if (resourceType == ResourceType.ClientEncryptionKey) { resourcePath = resourceFullName + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT; } else { String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } return resourcePath; } public static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed) { if (resourceType == ResourceType.PartitionKey) { return generatePath(resourceType, ownerOrResourceId, isFeed, OperationType.Delete); } else { return generatePath(resourceType, ownerOrResourceId, isFeed, null); } } private static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed, OperationType operationType) { if (isFeed && (ownerOrResourceId == null || ownerOrResourceId.isEmpty()) && resourceType != ResourceType.Database && resourceType != ResourceType.Offer && resourceType != ResourceType.MasterPartition && resourceType != ResourceType.ServerPartition && resourceType != ResourceType.DatabaseAccount && resourceType != ResourceType.Topology) { throw new IllegalStateException("INVALID resource type"); } if(ownerOrResourceId == null) { ownerOrResourceId = StringUtils.EMPTY; } if (isFeed && resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString(); } else if (isFeed && resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.StoredProcedure) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { ResourceId storedProcedureId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + storedProcedureId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + storedProcedureId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + storedProcedureId.getStoredProcedureId().toString(); } else if (isFeed && resourceType == ResourceType.UserDefinedFunction) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { ResourceId functionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + functionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + functionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + functionId.getUserDefinedFunctionId().toString(); } else if (isFeed && resourceType == ResourceType.Trigger) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { ResourceId triggerId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + triggerId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + triggerId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + triggerId.getTriggerId().toString(); } else if (isFeed && resourceType == ResourceType.Conflict) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { ResourceId conflictId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + conflictId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + conflictId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + conflictId.getConflictId().toString(); } else if (isFeed && resourceType == ResourceType.PartitionKeyRange) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { ResourceId partitionKeyRangeId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + partitionKeyRangeId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + partitionKeyRangeId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + partitionKeyRangeId.getPartitionKeyRangeId().toString(); } else if (isFeed && resourceType == ResourceType.Attachment) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { ResourceId attachmentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + attachmentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + attachmentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + attachmentId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + attachmentId.getAttachmentId().toString(); } else if (isFeed && resourceType == ResourceType.User) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString(); } else if (isFeed && resourceType == ResourceType.Permission) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { ResourceId permissionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + permissionId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + permissionId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + permissionId.getPermissionId().toString(); } else if (isFeed && resourceType == ResourceType.Document) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { ResourceId documentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentId.getDocumentId().toString(); } else if (isFeed && resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT; } else if (resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT; } else if (resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (resourceType == ResourceType.ClientEncryptionKey) { ResourceId clientEncryptionKeyId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + clientEncryptionKeyId.getDatabaseId().toString() + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT + "/" + clientEncryptionKeyId.getClientEncryptionKeyId().toString(); } else if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } String errorMessage = "invalid resource type"; throw new IllegalStateException(errorMessage); } public static PathInfo parsePathSegments(String resourceUrl) { String[] segments = StringUtils.strip(resourceUrl, "/").split("/"); if (segments == null || segments.length < 1) { return null; } int uriSegmentsCount = segments.length; String segmentOne = StringUtils.strip(segments[uriSegmentsCount - 1], "/"); String segmentTwo = (uriSegmentsCount >= 2) ? StringUtils.strip(segments[uriSegmentsCount - 2], "/") : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { return parseNameSegments(resourceUrl, segments); } } } if ((uriSegmentsCount % 2 != 0) && isResourceType(segmentOne)) { return new PathInfo(true, segmentOne, segmentOne.compareToIgnoreCase(Paths.DATABASES_PATH_SEGMENT) != 0 ? segmentTwo : StringUtils.EMPTY, false); } else if (isResourceType(segmentTwo)) { return new PathInfo(false, segmentTwo, segmentOne, false); } return null; } /** * Method which will return boolean based on whether it is able to parse the * path and name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param pathInfo Path info object which will hold information * @param clientVersion The Client version * @return */ public static boolean tryParsePathSegments(String resourceUrl, PathInfo pathInfo, String clientVersion) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; pathInfo.isFeed = false; pathInfo.isNameBased = false; if (StringUtils.isEmpty(resourceUrl)) { return false; } String trimmedStr = StringUtils.strip(resourceUrl, Constants.Properties.PATH_SEPARATOR); String[] segments = StringUtils.split(trimmedStr, Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { return false; } int uriSegmentsCount = segments.length; String segmentOne = segments[uriSegmentsCount - 1]; String segmentTwo = (uriSegmentsCount >= 2) ? segments[uriSegmentsCount - 2] : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.TOPOLOGY_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.RID_RANGE_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { pathInfo.isNameBased = true; return tryParseNameSegments(resourceUrl, segments, pathInfo); } } } if ((uriSegmentsCount % 2 != 0) && PathsHelper.isResourceType(segmentOne)) { pathInfo.isFeed = true; pathInfo.resourcePath = segmentOne; if (!segmentOne.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { pathInfo.resourceIdOrFullName = segmentTwo; } } else if (PathsHelper.isResourceType(segmentTwo)) { pathInfo.isFeed = false; pathInfo.resourcePath = segmentTwo; pathInfo.resourceIdOrFullName = segmentOne; if (!StringUtils.isEmpty(clientVersion) && pathInfo.resourcePath.equalsIgnoreCase(Paths.MEDIA_PATH_SEGMENT)) { } } else { return false; } return true; } /** * Method which will return boolean based on whether it is able to parse the * name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param segments * @param pathInfo Path info object which will hold information * @return */ private static boolean tryParseNameSegments(String resourceUrl, String[] segments, PathInfo pathInfo) { pathInfo.isFeed = false; pathInfo.resourceIdOrFullName = ""; pathInfo.resourcePath = ""; if (segments == null || segments.length < 1) { return false; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { pathInfo.resourcePath = segments[segments.length - 2]; pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceUrl); return true; } } else { if (isResourceType(segments[segments.length - 1])) { pathInfo.isFeed = true; pathInfo.resourcePath = segments[segments.length - 1]; String resourceIdOrFullName = resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl,Paths.ROOT).lastIndexOf(Paths.ROOT)); pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceIdOrFullName); return true; } } return false; } public static PathInfo parseNameSegments(String resourceUrl, String[] segments) { if (segments == null || segments.length < 1) { return null; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { return new PathInfo(false, segments[segments.length - 2], unescapeJavaAndTrim(resourceUrl), true); } } else { if (isResourceType(segments[segments.length - 1])) { return new PathInfo(true, segments[segments.length - 1], unescapeJavaAndTrim( resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl, Paths.ROOT).lastIndexOf(Paths.ROOT))), true); } } return null; } public static String unescapeJavaAndTrim(String resourceUrl) { if (resourceUrl == null) { return null; } int startInclusiveIndex = 0; while (startInclusiveIndex < resourceUrl.length() && resourceUrl.charAt(startInclusiveIndex) == Paths.ROOT_CHAR) { startInclusiveIndex++; } if (startInclusiveIndex == resourceUrl.length()) { return ""; } int endExclusiveIndex = resourceUrl.length(); while (endExclusiveIndex > startInclusiveIndex && resourceUrl.charAt(endExclusiveIndex - 1) == Paths.ROOT_CHAR) { endExclusiveIndex--; } for (int startLoopIndex = startInclusiveIndex; startLoopIndex < endExclusiveIndex; startLoopIndex++) { if (resourceUrl.charAt(startLoopIndex)== Paths.ESCAPE_CHAR) { return StringEscapeUtils.unescapeJava(StringUtils.strip(resourceUrl, Paths.ROOT)); } } if (startInclusiveIndex == 0 && endExclusiveIndex == resourceUrl.length()) { return resourceUrl; } return resourceUrl.substring(startInclusiveIndex, endExclusiveIndex); } private static boolean isResourceType(String resourcePathSegment) { if (StringUtils.isEmpty(resourcePathSegment)) { return false; } switch (resourcePathSegment.toLowerCase(Locale.ROOT)) { case Paths.ATTACHMENTS_PATH_SEGMENT: case Paths.COLLECTIONS_PATH_SEGMENT: case Paths.DATABASES_PATH_SEGMENT: case Paths.PERMISSIONS_PATH_SEGMENT: case Paths.USERS_PATH_SEGMENT: case Paths.DOCUMENTS_PATH_SEGMENT: case Paths.STORED_PROCEDURES_PATH_SEGMENT: case Paths.TRIGGERS_PATH_SEGMENT: case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: case Paths.CONFLICTS_PATH_SEGMENT: case Paths.MEDIA_PATH_SEGMENT: case Paths.OFFERS_PATH_SEGMENT: case Paths.PARTITIONS_PATH_SEGMENT: case Paths.DATABASE_ACCOUNT_PATH_SEGMENT: case Paths.TOPOLOGY_PATH_SEGMENT: case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT: case Paths.SCHEMAS_PATH_SEGMENT: case Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT: return true; default: return false; } } public static String generatePathForNameBased(ResourceType resourceType, String resourceOwnerFullName, String resourceName) { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; case DocumentCollection: return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; case StoredProcedure: return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; case UserDefinedFunction: return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; case Trigger: return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; case Attachment: return resourceOwnerFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + resourceName; case Conflict: return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; case Document: return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; case Offer: return resourceOwnerFullName + "/" + Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; case Permission: return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; case User: return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; case PartitionKeyRange: return resourceOwnerFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + resourceName; default: return null; } } public static String getDatabasePath(String resourceFullName) { if (resourceFullName != null) { int index = indexOfNth(resourceFullName, '/', 2); if (index > 0) return resourceFullName.substring(0, index); } return resourceFullName; } public static String getParentByIndex(String resourceFullName, int segmentIndex) { int index = indexOfNth(resourceFullName, '/', segmentIndex); if (index > 0) return resourceFullName.substring(0, index); else { index = indexOfNth(resourceFullName, '/', segmentIndex - 1); if (index > 0) return resourceFullName; else return null; } } public static boolean isNameBased(String resourceIdOrFullName) { if (resourceIdOrFullName != null && !resourceIdOrFullName.isEmpty() && resourceIdOrFullName.length() > 4 && resourceIdOrFullName.charAt(3) == '/') { return true; } return false; } private static int indexOfNth(String str, char value, int nthOccurance) { int remaining = nthOccurance; char[] characters = str.toCharArray(); for (int i = 0; i < characters.length; i++) { if (characters[i] == value) { remaining--; if (remaining == 0) { return i; } } } return -1; } public static ResourceType getResourcePathSegment(String resourcePathSegment) throws BadRequestException { if (StringUtils.isEmpty(resourcePathSegment)) { String message = String.format(RMResources.StringArgumentNullOrEmpty, "resourcePathSegment"); throw new BadRequestException(message); } switch (resourcePathSegment) { case Paths.ATTACHMENTS_PATH_SEGMENT: return ResourceType.Attachment; case Paths.COLLECTIONS_PATH_SEGMENT: return ResourceType.DocumentCollection; case Paths.DATABASES_PATH_SEGMENT: return ResourceType.Database; case Paths.PERMISSIONS_PATH_SEGMENT: return ResourceType.Permission; case Paths.USERS_PATH_SEGMENT: return ResourceType.User; case Paths.DOCUMENTS_PATH_SEGMENT: return ResourceType.Document; case Paths.STORED_PROCEDURES_PATH_SEGMENT: return ResourceType.StoredProcedure; case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: return ResourceType.UserDefinedFunction; case Paths.TRIGGERS_PATH_SEGMENT: return ResourceType.Trigger; case Paths.CONFLICTS_PATH_SEGMENT: return ResourceType.Conflict; case Paths.OFFERS_PATH_SEGMENT: return ResourceType.Offer; case Paths.SCHEMAS_PATH_SEGMENT: return ResourceType.Schema; } String errorMessage = String.format(RMResources.UnknownResourceType, resourcePathSegment); throw new BadRequestException(errorMessage); } public static String getResourcePath(ResourceType resourceType) throws BadRequestException { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case MasterPartition: case ServerPartition: return Paths.PARTITIONS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_ROOT; case Schema: return Paths.SCHEMAS_PATH_SEGMENT; case DatabaseAccount: case Topology: return Paths.ROOT; default: String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); throw new BadRequestException(errorMessage); } } public static boolean validateResourceFullName(ResourceType resourceType, String resourceFullName) { String[] segments = StringUtils.split(resourceFullName, '/'); String[] resourcePathArray = getResourcePathArray(resourceType); if (resourcePathArray == null) { return false; } if (segments.length != resourcePathArray.length * 2) { return false; } for (int i = 0; i < resourcePathArray.length; i++) { if(resourcePathArray[i].compareTo(segments[2 * i]) != 0) { return false; } } return true; } private static String[] getResourcePathArray(ResourceType resourceType) { List<String> segments = new ArrayList<String>(); segments.add(Paths.DATABASES_PATH_SEGMENT); if (resourceType == ResourceType.Permission || resourceType == ResourceType.User) { segments.add(Paths.USERS_PATH_SEGMENT); if (resourceType == ResourceType.Permission) { segments.add(Paths.PERMISSIONS_PATH_SEGMENT); } } else if (resourceType == ResourceType.ClientEncryptionKey) { segments.add(Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); } else if (resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.StoredProcedure || resourceType == ResourceType.UserDefinedFunction || resourceType == ResourceType.Trigger || resourceType == ResourceType.Conflict || resourceType == ResourceType.Attachment || resourceType == ResourceType.Document || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.Schema) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); if (resourceType == ResourceType.StoredProcedure) { segments.add(Paths.STORED_PROCEDURES_PATH_SEGMENT); } else if(resourceType == ResourceType.UserDefinedFunction) { segments.add(Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); } else if(resourceType == ResourceType.Trigger) { segments.add(Paths.TRIGGERS_PATH_SEGMENT); } else if (resourceType == ResourceType.Conflict) { segments.add(Paths.CONFLICTS_PATH_SEGMENT); } else if (resourceType == ResourceType.Schema) { segments.add(Paths.SCHEMAS_PATH_SEGMENT); } else if(resourceType == ResourceType.Document || resourceType == ResourceType.Attachment) { segments.add(Paths.DOCUMENTS_PATH_SEGMENT); if (resourceType == ResourceType.Attachment) { segments.add(Paths.ATTACHMENTS_PATH_SEGMENT); } } else if(resourceType == ResourceType.PartitionKeyRange) { segments.add(Paths.PARTITION_KEY_RANGES_PATH_SEGMENT); } else if (resourceType == ResourceType.PartitionKey) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); segments.add(Paths.OPERATIONS_PATH_SEGMENT); } } else if (resourceType != ResourceType.Database) { return null; } return segments.stream().toArray(String[]::new); } public static boolean validateResourceId(ResourceType resourceType, String resourceId) { if (resourceType == ResourceType.Conflict) { return PathsHelper.validateConflictId(resourceId); } else if (resourceType == ResourceType.Database) { return PathsHelper.validateDatabaseId(resourceId); } else if (resourceType == ResourceType.DocumentCollection) { return PathsHelper.validateDocumentCollectionId(resourceId); } else if (resourceType == ResourceType.Document) { return PathsHelper.validateDocumentId(resourceId); } else if (resourceType == ResourceType.Permission) { return PathsHelper.validatePermissionId(resourceId); } else if (resourceType == ResourceType.StoredProcedure) { return PathsHelper.validateStoredProcedureId(resourceId); } else if (resourceType == ResourceType.Trigger) { return PathsHelper.validateTriggerId(resourceId); } else if (resourceType == ResourceType.UserDefinedFunction) { return PathsHelper.validateUserDefinedFunctionId(resourceId); } else if (resourceType == ResourceType.User) { return PathsHelper.validateUserId(resourceId); } else if (resourceType == ResourceType.Attachment) { return PathsHelper.validateAttachmentId(resourceId); } else if (resourceType == ResourceType.ClientEncryptionKey) { return PathsHelper.validateClientEncryptionKeyId(resourceId); }else { logger.error(String.format("ValidateResourceId not implemented for Type %s in ResourceRequestHandler", resourceType.toString())); return false; } } public static boolean validateDatabaseId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDatabase() != 0; } public static boolean validateDocumentCollectionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocumentCollection() != 0; } public static boolean validateDocumentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocument() != 0; } public static boolean validateConflictId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getConflict() != 0; } public static boolean validateAttachmentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getAttachment() != 0; } public static boolean validatePermissionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getPermission() != 0; } public static boolean validateStoredProcedureId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getStoredProcedure() != 0; } public static boolean validateTriggerId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getTrigger() != 0; } public static boolean validateUserDefinedFunctionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUserDefinedFunction() != 0; } public static boolean validateUserId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUser() != 0; } public static boolean validateClientEncryptionKeyId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getClientEncryptionKey() != 0; } public static boolean isPublicResource(Resource resourceType) { if (resourceType instanceof Database || resourceType instanceof DocumentCollection || resourceType instanceof StoredProcedure || resourceType instanceof UserDefinedFunction || resourceType instanceof Trigger || resourceType instanceof Conflict || resourceType instanceof User || resourceType instanceof Permission || resourceType instanceof Document || resourceType instanceof Offer ) { return true; } else { return false; } } }
class PathsHelper { private final static Logger logger = LoggerFactory.getLogger(PathsHelper.class); public static String generatePath(ResourceType resourceType, RxDocumentServiceRequest request, boolean isFeed) { if (request.getIsNameBased()) { return generatePathForNameBased(resourceType, request.getResourceAddress(), isFeed, request.getOperationType()); } else { return generatePath(resourceType, request.getResourceId(), isFeed, request.getOperationType()); } } public static String generatePathForNameBased(Resource resourceType, String resourceOwnerFullName, String resourceName) { if (resourceName == null) return null; if (resourceType instanceof Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; } else if (resourceOwnerFullName == null) { return null; } else if (resourceType instanceof DocumentCollection) { return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof StoredProcedure) { return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof UserDefinedFunction) { return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Trigger) { return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Conflict) { return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof User) { return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Permission) { return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Document) { return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Resource) { return null; } String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } private static String generatePathForNameBased(ResourceType resourceType, String resourceFullName, boolean isFeed, OperationType operationType) { if (isFeed && Strings.isNullOrEmpty(resourceFullName) && resourceType != ResourceType.Database) { String errorMessage = String.format(RMResources.UnexpectedResourceType, resourceType); throw new IllegalArgumentException(errorMessage); } String resourcePath = null; if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { resourcePath = resourceFullName + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } else if (!isFeed) { resourcePath = resourceFullName; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { resourcePath = resourceFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { resourcePath = resourceFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { resourcePath = resourceFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { resourcePath = resourceFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { resourcePath = resourceFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { resourcePath = resourceFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { resourcePath = resourceFullName + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { resourcePath = resourceFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { resourcePath = resourceFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return resourceFullName + "/" + Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { return resourceFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.Schema) { resourcePath = resourceFullName + "/" + Paths.SCHEMAS_PATH_SEGMENT; } else if (resourceType == ResourceType.ClientEncryptionKey) { resourcePath = resourceFullName + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT; } else { String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } return resourcePath; } public static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed) { if (resourceType == ResourceType.PartitionKey) { return generatePath(resourceType, ownerOrResourceId, isFeed, OperationType.Delete); } else { return generatePath(resourceType, ownerOrResourceId, isFeed, null); } } private static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed, OperationType operationType) { if (isFeed && (ownerOrResourceId == null || ownerOrResourceId.isEmpty()) && resourceType != ResourceType.Database && resourceType != ResourceType.Offer && resourceType != ResourceType.MasterPartition && resourceType != ResourceType.ServerPartition && resourceType != ResourceType.DatabaseAccount && resourceType != ResourceType.Topology) { throw new IllegalStateException("INVALID resource type"); } if(ownerOrResourceId == null) { ownerOrResourceId = StringUtils.EMPTY; } if (isFeed && resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString(); } else if (isFeed && resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.StoredProcedure) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { ResourceId storedProcedureId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + storedProcedureId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + storedProcedureId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + storedProcedureId.getStoredProcedureId().toString(); } else if (isFeed && resourceType == ResourceType.UserDefinedFunction) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { ResourceId functionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + functionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + functionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + functionId.getUserDefinedFunctionId().toString(); } else if (isFeed && resourceType == ResourceType.Trigger) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { ResourceId triggerId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + triggerId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + triggerId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + triggerId.getTriggerId().toString(); } else if (isFeed && resourceType == ResourceType.Conflict) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { ResourceId conflictId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + conflictId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + conflictId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + conflictId.getConflictId().toString(); } else if (isFeed && resourceType == ResourceType.PartitionKeyRange) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { ResourceId partitionKeyRangeId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + partitionKeyRangeId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + partitionKeyRangeId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + partitionKeyRangeId.getPartitionKeyRangeId().toString(); } else if (isFeed && resourceType == ResourceType.Attachment) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { ResourceId attachmentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + attachmentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + attachmentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + attachmentId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + attachmentId.getAttachmentId().toString(); } else if (isFeed && resourceType == ResourceType.User) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString(); } else if (isFeed && resourceType == ResourceType.Permission) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { ResourceId permissionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + permissionId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + permissionId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + permissionId.getPermissionId().toString(); } else if (isFeed && resourceType == ResourceType.Document) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { ResourceId documentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentId.getDocumentId().toString(); } else if (isFeed && resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT; } else if (resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT; } else if (resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (resourceType == ResourceType.ClientEncryptionKey) { ResourceId clientEncryptionKeyId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + clientEncryptionKeyId.getDatabaseId().toString() + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT + "/" + clientEncryptionKeyId.getClientEncryptionKeyId().toString(); } else if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } String errorMessage = "invalid resource type"; throw new IllegalStateException(errorMessage); } public static PathInfo parsePathSegments(String resourceUrl) { String[] segments = StringUtils.strip(resourceUrl, "/").split("/"); if (segments == null || segments.length < 1) { return null; } int uriSegmentsCount = segments.length; String segmentOne = StringUtils.strip(segments[uriSegmentsCount - 1], "/"); String segmentTwo = (uriSegmentsCount >= 2) ? StringUtils.strip(segments[uriSegmentsCount - 2], "/") : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { return parseNameSegments(resourceUrl, segments); } } } if ((uriSegmentsCount % 2 != 0) && isResourceType(segmentOne)) { return new PathInfo(true, segmentOne, segmentOne.compareToIgnoreCase(Paths.DATABASES_PATH_SEGMENT) != 0 ? segmentTwo : StringUtils.EMPTY, false); } else if (isResourceType(segmentTwo)) { return new PathInfo(false, segmentTwo, segmentOne, false); } return null; } /** * Method which will return boolean based on whether it is able to parse the * path and name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param pathInfo Path info object which will hold information * @param clientVersion The Client version * @return */ public static boolean tryParsePathSegments(String resourceUrl, PathInfo pathInfo, String clientVersion) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; pathInfo.isFeed = false; pathInfo.isNameBased = false; if (StringUtils.isEmpty(resourceUrl)) { return false; } String trimmedStr = StringUtils.strip(resourceUrl, Constants.Properties.PATH_SEPARATOR); String[] segments = StringUtils.split(trimmedStr, Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { return false; } int uriSegmentsCount = segments.length; String segmentOne = segments[uriSegmentsCount - 1]; String segmentTwo = (uriSegmentsCount >= 2) ? segments[uriSegmentsCount - 2] : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.TOPOLOGY_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.RID_RANGE_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { pathInfo.isNameBased = true; return tryParseNameSegments(resourceUrl, segments, pathInfo); } } } if ((uriSegmentsCount % 2 != 0) && PathsHelper.isResourceType(segmentOne)) { pathInfo.isFeed = true; pathInfo.resourcePath = segmentOne; if (!segmentOne.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { pathInfo.resourceIdOrFullName = segmentTwo; } } else if (PathsHelper.isResourceType(segmentTwo)) { pathInfo.isFeed = false; pathInfo.resourcePath = segmentTwo; pathInfo.resourceIdOrFullName = segmentOne; if (!StringUtils.isEmpty(clientVersion) && pathInfo.resourcePath.equalsIgnoreCase(Paths.MEDIA_PATH_SEGMENT)) { } } else { return false; } return true; } /** * Method which will return boolean based on whether it is able to parse the * name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param segments * @param pathInfo Path info object which will hold information * @return */ private static boolean tryParseNameSegments(String resourceUrl, String[] segments, PathInfo pathInfo) { pathInfo.isFeed = false; pathInfo.resourceIdOrFullName = ""; pathInfo.resourcePath = ""; if (segments == null || segments.length < 1) { return false; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { pathInfo.resourcePath = segments[segments.length - 2]; pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceUrl); return true; } } else { if (isResourceType(segments[segments.length - 1])) { pathInfo.isFeed = true; pathInfo.resourcePath = segments[segments.length - 1]; String resourceIdOrFullName = resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl,Paths.ROOT).lastIndexOf(Paths.ROOT)); pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceIdOrFullName); return true; } } return false; } public static PathInfo parseNameSegments(String resourceUrl, String[] segments) { if (segments == null || segments.length < 1) { return null; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { return new PathInfo(false, segments[segments.length - 2], unescapeJavaAndTrim(resourceUrl), true); } } else { if (isResourceType(segments[segments.length - 1])) { return new PathInfo(true, segments[segments.length - 1], unescapeJavaAndTrim( resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl, Paths.ROOT).lastIndexOf(Paths.ROOT))), true); } } return null; } public static String unescapeJavaAndTrim(String resourceUrl) { if (resourceUrl == null) { return null; } int startInclusiveIndex = 0; while (startInclusiveIndex < resourceUrl.length() && resourceUrl.charAt(startInclusiveIndex) == Paths.ROOT_CHAR) { startInclusiveIndex++; } if (startInclusiveIndex == resourceUrl.length()) { return ""; } int endExclusiveIndex = resourceUrl.length(); while (endExclusiveIndex > startInclusiveIndex && resourceUrl.charAt(endExclusiveIndex - 1) == Paths.ROOT_CHAR) { endExclusiveIndex--; } for (int startLoopIndex = startInclusiveIndex; startLoopIndex < endExclusiveIndex; startLoopIndex++) { if (resourceUrl.charAt(startLoopIndex)== Paths.ESCAPE_CHAR) { return StringEscapeUtils.unescapeJava(StringUtils.strip(resourceUrl, Paths.ROOT)); } } if (startInclusiveIndex == 0 && endExclusiveIndex == resourceUrl.length()) { return resourceUrl; } return resourceUrl.substring(startInclusiveIndex, endExclusiveIndex); } private static boolean isResourceType(String resourcePathSegment) { if (StringUtils.isEmpty(resourcePathSegment)) { return false; } switch (resourcePathSegment.toLowerCase(Locale.ROOT)) { case Paths.ATTACHMENTS_PATH_SEGMENT: case Paths.COLLECTIONS_PATH_SEGMENT: case Paths.DATABASES_PATH_SEGMENT: case Paths.PERMISSIONS_PATH_SEGMENT: case Paths.USERS_PATH_SEGMENT: case Paths.DOCUMENTS_PATH_SEGMENT: case Paths.STORED_PROCEDURES_PATH_SEGMENT: case Paths.TRIGGERS_PATH_SEGMENT: case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: case Paths.CONFLICTS_PATH_SEGMENT: case Paths.MEDIA_PATH_SEGMENT: case Paths.OFFERS_PATH_SEGMENT: case Paths.PARTITIONS_PATH_SEGMENT: case Paths.DATABASE_ACCOUNT_PATH_SEGMENT: case Paths.TOPOLOGY_PATH_SEGMENT: case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT: case Paths.SCHEMAS_PATH_SEGMENT: case Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT: return true; default: return false; } } public static String generatePathForNameBased(ResourceType resourceType, String resourceOwnerFullName, String resourceName) { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; case DocumentCollection: return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; case StoredProcedure: return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; case UserDefinedFunction: return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; case Trigger: return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; case Attachment: return resourceOwnerFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + resourceName; case Conflict: return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; case Document: return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; case Offer: return resourceOwnerFullName + "/" + Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; case Permission: return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; case User: return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; case PartitionKeyRange: return resourceOwnerFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + resourceName; default: return null; } } public static String getDatabasePath(String resourceFullName) { if (resourceFullName != null) { int index = indexOfNth(resourceFullName, '/', 2); if (index > 0) return resourceFullName.substring(0, index); } return resourceFullName; } public static String getParentByIndex(String resourceFullName, int segmentIndex) { int index = indexOfNth(resourceFullName, '/', segmentIndex); if (index > 0) return resourceFullName.substring(0, index); else { index = indexOfNth(resourceFullName, '/', segmentIndex - 1); if (index > 0) return resourceFullName; else return null; } } public static boolean isNameBased(String resourceIdOrFullName) { if (resourceIdOrFullName != null && !resourceIdOrFullName.isEmpty() && resourceIdOrFullName.length() > 4 && resourceIdOrFullName.charAt(3) == '/') { return true; } return false; } private static int indexOfNth(String str, char value, int nthOccurance) { int remaining = nthOccurance; char[] characters = str.toCharArray(); for (int i = 0; i < characters.length; i++) { if (characters[i] == value) { remaining--; if (remaining == 0) { return i; } } } return -1; } public static ResourceType getResourcePathSegment(String resourcePathSegment) throws BadRequestException { if (StringUtils.isEmpty(resourcePathSegment)) { String message = String.format(RMResources.StringArgumentNullOrEmpty, "resourcePathSegment"); throw new BadRequestException(message); } switch (resourcePathSegment) { case Paths.ATTACHMENTS_PATH_SEGMENT: return ResourceType.Attachment; case Paths.COLLECTIONS_PATH_SEGMENT: return ResourceType.DocumentCollection; case Paths.DATABASES_PATH_SEGMENT: return ResourceType.Database; case Paths.PERMISSIONS_PATH_SEGMENT: return ResourceType.Permission; case Paths.USERS_PATH_SEGMENT: return ResourceType.User; case Paths.DOCUMENTS_PATH_SEGMENT: return ResourceType.Document; case Paths.STORED_PROCEDURES_PATH_SEGMENT: return ResourceType.StoredProcedure; case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: return ResourceType.UserDefinedFunction; case Paths.TRIGGERS_PATH_SEGMENT: return ResourceType.Trigger; case Paths.CONFLICTS_PATH_SEGMENT: return ResourceType.Conflict; case Paths.OFFERS_PATH_SEGMENT: return ResourceType.Offer; case Paths.SCHEMAS_PATH_SEGMENT: return ResourceType.Schema; } String errorMessage = String.format(RMResources.UnknownResourceType, resourcePathSegment); throw new BadRequestException(errorMessage); } public static String getResourcePath(ResourceType resourceType) throws BadRequestException { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case MasterPartition: case ServerPartition: return Paths.PARTITIONS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_ROOT; case Schema: return Paths.SCHEMAS_PATH_SEGMENT; case DatabaseAccount: case Topology: return Paths.ROOT; default: String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); throw new BadRequestException(errorMessage); } } public static boolean validateResourceFullName(ResourceType resourceType, String resourceFullName) { String[] segments = StringUtils.split(resourceFullName, '/'); String[] resourcePathArray = getResourcePathArray(resourceType); if (resourcePathArray == null) { return false; } if (segments.length != resourcePathArray.length * 2) { return false; } for (int i = 0; i < resourcePathArray.length; i++) { if(resourcePathArray[i].compareTo(segments[2 * i]) != 0) { return false; } } return true; } private static String[] getResourcePathArray(ResourceType resourceType) { List<String> segments = new ArrayList<String>(); segments.add(Paths.DATABASES_PATH_SEGMENT); if (resourceType == ResourceType.Permission || resourceType == ResourceType.User) { segments.add(Paths.USERS_PATH_SEGMENT); if (resourceType == ResourceType.Permission) { segments.add(Paths.PERMISSIONS_PATH_SEGMENT); } } else if (resourceType == ResourceType.ClientEncryptionKey) { segments.add(Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); } else if (resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.StoredProcedure || resourceType == ResourceType.UserDefinedFunction || resourceType == ResourceType.Trigger || resourceType == ResourceType.Conflict || resourceType == ResourceType.Attachment || resourceType == ResourceType.Document || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.Schema) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); if (resourceType == ResourceType.StoredProcedure) { segments.add(Paths.STORED_PROCEDURES_PATH_SEGMENT); } else if(resourceType == ResourceType.UserDefinedFunction) { segments.add(Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); } else if(resourceType == ResourceType.Trigger) { segments.add(Paths.TRIGGERS_PATH_SEGMENT); } else if (resourceType == ResourceType.Conflict) { segments.add(Paths.CONFLICTS_PATH_SEGMENT); } else if (resourceType == ResourceType.Schema) { segments.add(Paths.SCHEMAS_PATH_SEGMENT); } else if(resourceType == ResourceType.Document || resourceType == ResourceType.Attachment) { segments.add(Paths.DOCUMENTS_PATH_SEGMENT); if (resourceType == ResourceType.Attachment) { segments.add(Paths.ATTACHMENTS_PATH_SEGMENT); } } else if(resourceType == ResourceType.PartitionKeyRange) { segments.add(Paths.PARTITION_KEY_RANGES_PATH_SEGMENT); } else if (resourceType == ResourceType.PartitionKey) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); segments.add(Paths.OPERATIONS_PATH_SEGMENT); } } else if (resourceType != ResourceType.Database) { return null; } return segments.stream().toArray(String[]::new); } public static boolean validateResourceId(ResourceType resourceType, String resourceId) { if (resourceType == ResourceType.Conflict) { return PathsHelper.validateConflictId(resourceId); } else if (resourceType == ResourceType.Database) { return PathsHelper.validateDatabaseId(resourceId); } else if (resourceType == ResourceType.DocumentCollection) { return PathsHelper.validateDocumentCollectionId(resourceId); } else if (resourceType == ResourceType.Document) { return PathsHelper.validateDocumentId(resourceId); } else if (resourceType == ResourceType.Permission) { return PathsHelper.validatePermissionId(resourceId); } else if (resourceType == ResourceType.StoredProcedure) { return PathsHelper.validateStoredProcedureId(resourceId); } else if (resourceType == ResourceType.Trigger) { return PathsHelper.validateTriggerId(resourceId); } else if (resourceType == ResourceType.UserDefinedFunction) { return PathsHelper.validateUserDefinedFunctionId(resourceId); } else if (resourceType == ResourceType.User) { return PathsHelper.validateUserId(resourceId); } else if (resourceType == ResourceType.Attachment) { return PathsHelper.validateAttachmentId(resourceId); } else if (resourceType == ResourceType.ClientEncryptionKey) { return PathsHelper.validateClientEncryptionKeyId(resourceId); }else { logger.error(String.format("ValidateResourceId not implemented for Type %s in ResourceRequestHandler", resourceType.toString())); return false; } } public static boolean validateDatabaseId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDatabase() != 0; } public static boolean validateDocumentCollectionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocumentCollection() != 0; } public static boolean validateDocumentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocument() != 0; } public static boolean validateConflictId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getConflict() != 0; } public static boolean validateAttachmentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getAttachment() != 0; } public static boolean validatePermissionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getPermission() != 0; } public static boolean validateStoredProcedureId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getStoredProcedure() != 0; } public static boolean validateTriggerId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getTrigger() != 0; } public static boolean validateUserDefinedFunctionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUserDefinedFunction() != 0; } public static boolean validateUserId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUser() != 0; } public static boolean validateClientEncryptionKeyId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getClientEncryptionKey() != 0; } public static boolean isPublicResource(Resource resourceType) { if (resourceType instanceof Database || resourceType instanceof DocumentCollection || resourceType instanceof StoredProcedure || resourceType instanceof UserDefinedFunction || resourceType instanceof Trigger || resourceType instanceof Conflict || resourceType instanceof User || resourceType instanceof Permission || resourceType instanceof Document || resourceType instanceof Offer ) { return true; } else { return false; } } }
You might need an else in the if-block in 587/588 - but I don't quite see why switching to a StringBuilder here?
public static String getCollectionPath(String resourceFullName) { StringBuilder trimmedResourceFullName = new StringBuilder(); if (resourceFullName != null) { trimmedResourceFullName.append(Utils.trimBeginningAndEndingSlashes(resourceFullName)); int index = indexOfNth(trimmedResourceFullName.toString(), '/', 4); if (index > 0) return trimmedResourceFullName.substring(0, index); } return trimmedResourceFullName.length() == 0 ? resourceFullName : trimmedResourceFullName.toString(); }
StringBuilder trimmedResourceFullName = new StringBuilder();
public static String getCollectionPath(String resourceFullName) { if (resourceFullName != null) { String trimmedResourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName); int index = indexOfNth(trimmedResourceFullName.toString(), '/', 4); if (index > 0) { return trimmedResourceFullName.substring(0, index); } else { return trimmedResourceFullName; } } return resourceFullName; }
class PathsHelper { private final static Logger logger = LoggerFactory.getLogger(PathsHelper.class); public static String generatePath(ResourceType resourceType, RxDocumentServiceRequest request, boolean isFeed) { if (request.getIsNameBased()) { return generatePathForNameBased(resourceType, request.getResourceAddress(), isFeed, request.getOperationType()); } else { return generatePath(resourceType, request.getResourceId(), isFeed, request.getOperationType()); } } public static String generatePathForNameBased(Resource resourceType, String resourceOwnerFullName, String resourceName) { if (resourceName == null) return null; if (resourceType instanceof Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; } else if (resourceOwnerFullName == null) { return null; } else if (resourceType instanceof DocumentCollection) { return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof StoredProcedure) { return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof UserDefinedFunction) { return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Trigger) { return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Conflict) { return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof User) { return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Permission) { return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Document) { return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Resource) { return null; } String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } private static String generatePathForNameBased(ResourceType resourceType, String resourceFullName, boolean isFeed, OperationType operationType) { if (isFeed && Strings.isNullOrEmpty(resourceFullName) && resourceType != ResourceType.Database) { String errorMessage = String.format(RMResources.UnexpectedResourceType, resourceType); throw new IllegalArgumentException(errorMessage); } String resourcePath = null; if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { resourcePath = resourceFullName + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } else if (!isFeed) { resourcePath = resourceFullName; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { resourcePath = resourceFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { resourcePath = resourceFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { resourcePath = resourceFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { resourcePath = resourceFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { resourcePath = resourceFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { resourcePath = resourceFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { resourcePath = resourceFullName + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { resourcePath = resourceFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { resourcePath = resourceFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return resourceFullName + "/" + Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { return resourceFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.Schema) { resourcePath = resourceFullName + "/" + Paths.SCHEMAS_PATH_SEGMENT; } else if (resourceType == ResourceType.ClientEncryptionKey) { resourcePath = resourceFullName + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT; } else { String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } return resourcePath; } public static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed) { if (resourceType == ResourceType.PartitionKey) { return generatePath(resourceType, ownerOrResourceId, isFeed, OperationType.Delete); } else { return generatePath(resourceType, ownerOrResourceId, isFeed, null); } } private static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed, OperationType operationType) { if (isFeed && (ownerOrResourceId == null || ownerOrResourceId.isEmpty()) && resourceType != ResourceType.Database && resourceType != ResourceType.Offer && resourceType != ResourceType.MasterPartition && resourceType != ResourceType.ServerPartition && resourceType != ResourceType.DatabaseAccount && resourceType != ResourceType.Topology) { throw new IllegalStateException("INVALID resource type"); } if(ownerOrResourceId == null) { ownerOrResourceId = StringUtils.EMPTY; } if (isFeed && resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString(); } else if (isFeed && resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.StoredProcedure) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { ResourceId storedProcedureId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + storedProcedureId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + storedProcedureId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + storedProcedureId.getStoredProcedureId().toString(); } else if (isFeed && resourceType == ResourceType.UserDefinedFunction) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { ResourceId functionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + functionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + functionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + functionId.getUserDefinedFunctionId().toString(); } else if (isFeed && resourceType == ResourceType.Trigger) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { ResourceId triggerId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + triggerId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + triggerId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + triggerId.getTriggerId().toString(); } else if (isFeed && resourceType == ResourceType.Conflict) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { ResourceId conflictId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + conflictId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + conflictId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + conflictId.getConflictId().toString(); } else if (isFeed && resourceType == ResourceType.PartitionKeyRange) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { ResourceId partitionKeyRangeId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + partitionKeyRangeId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + partitionKeyRangeId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + partitionKeyRangeId.getPartitionKeyRangeId().toString(); } else if (isFeed && resourceType == ResourceType.Attachment) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { ResourceId attachmentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + attachmentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + attachmentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + attachmentId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + attachmentId.getAttachmentId().toString(); } else if (isFeed && resourceType == ResourceType.User) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString(); } else if (isFeed && resourceType == ResourceType.Permission) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { ResourceId permissionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + permissionId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + permissionId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + permissionId.getPermissionId().toString(); } else if (isFeed && resourceType == ResourceType.Document) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { ResourceId documentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentId.getDocumentId().toString(); } else if (isFeed && resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT; } else if (resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT; } else if (resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (resourceType == ResourceType.ClientEncryptionKey) { ResourceId clientEncryptionKeyId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + clientEncryptionKeyId.getDatabaseId().toString() + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT + "/" + clientEncryptionKeyId.getClientEncryptionKeyId().toString(); } else if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } String errorMessage = "invalid resource type"; throw new IllegalStateException(errorMessage); } public static PathInfo parsePathSegments(String resourceUrl) { String[] segments = StringUtils.strip(resourceUrl, "/").split("/"); if (segments == null || segments.length < 1) { return null; } int uriSegmentsCount = segments.length; String segmentOne = StringUtils.strip(segments[uriSegmentsCount - 1], "/"); String segmentTwo = (uriSegmentsCount >= 2) ? StringUtils.strip(segments[uriSegmentsCount - 2], "/") : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { return parseNameSegments(resourceUrl, segments); } } } if ((uriSegmentsCount % 2 != 0) && isResourceType(segmentOne)) { return new PathInfo(true, segmentOne, segmentOne.compareToIgnoreCase(Paths.DATABASES_PATH_SEGMENT) != 0 ? segmentTwo : StringUtils.EMPTY, false); } else if (isResourceType(segmentTwo)) { return new PathInfo(false, segmentTwo, segmentOne, false); } return null; } /** * Method which will return boolean based on whether it is able to parse the * path and name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param pathInfo Path info object which will hold information * @param clientVersion The Client version * @return */ public static boolean tryParsePathSegments(String resourceUrl, PathInfo pathInfo, String clientVersion) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; pathInfo.isFeed = false; pathInfo.isNameBased = false; if (StringUtils.isEmpty(resourceUrl)) { return false; } String trimmedStr = StringUtils.strip(resourceUrl, Constants.Properties.PATH_SEPARATOR); String[] segments = StringUtils.split(trimmedStr, Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { return false; } int uriSegmentsCount = segments.length; String segmentOne = segments[uriSegmentsCount - 1]; String segmentTwo = (uriSegmentsCount >= 2) ? segments[uriSegmentsCount - 2] : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.TOPOLOGY_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.RID_RANGE_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { pathInfo.isNameBased = true; return tryParseNameSegments(resourceUrl, segments, pathInfo); } } } if ((uriSegmentsCount % 2 != 0) && PathsHelper.isResourceType(segmentOne)) { pathInfo.isFeed = true; pathInfo.resourcePath = segmentOne; if (!segmentOne.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { pathInfo.resourceIdOrFullName = segmentTwo; } } else if (PathsHelper.isResourceType(segmentTwo)) { pathInfo.isFeed = false; pathInfo.resourcePath = segmentTwo; pathInfo.resourceIdOrFullName = segmentOne; if (!StringUtils.isEmpty(clientVersion) && pathInfo.resourcePath.equalsIgnoreCase(Paths.MEDIA_PATH_SEGMENT)) { } } else { return false; } return true; } /** * Method which will return boolean based on whether it is able to parse the * name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param segments * @param pathInfo Path info object which will hold information * @return */ private static boolean tryParseNameSegments(String resourceUrl, String[] segments, PathInfo pathInfo) { pathInfo.isFeed = false; pathInfo.resourceIdOrFullName = ""; pathInfo.resourcePath = ""; if (segments == null || segments.length < 1) { return false; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { pathInfo.resourcePath = segments[segments.length - 2]; pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceUrl); return true; } } else { if (isResourceType(segments[segments.length - 1])) { pathInfo.isFeed = true; pathInfo.resourcePath = segments[segments.length - 1]; String resourceIdOrFullName = resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl,Paths.ROOT).lastIndexOf(Paths.ROOT)); pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceIdOrFullName); return true; } } return false; } public static PathInfo parseNameSegments(String resourceUrl, String[] segments) { if (segments == null || segments.length < 1) { return null; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { return new PathInfo(false, segments[segments.length - 2], unescapeJavaAndTrim(resourceUrl), true); } } else { if (isResourceType(segments[segments.length - 1])) { return new PathInfo(true, segments[segments.length - 1], unescapeJavaAndTrim( resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl, Paths.ROOT).lastIndexOf(Paths.ROOT))), true); } } return null; } public static String unescapeJavaAndTrim(String resourceUrl) { if (resourceUrl == null) { return null; } int startInclusiveIndex = 0; while (startInclusiveIndex < resourceUrl.length() && resourceUrl.charAt(startInclusiveIndex) == Paths.ROOT_CHAR) { startInclusiveIndex++; } if (startInclusiveIndex == resourceUrl.length()) { return ""; } int endExclusiveIndex = resourceUrl.length(); while (endExclusiveIndex > startInclusiveIndex && resourceUrl.charAt(endExclusiveIndex - 1) == Paths.ROOT_CHAR) { endExclusiveIndex--; } for (int startLoopIndex = startInclusiveIndex; startLoopIndex < endExclusiveIndex; startLoopIndex++) { if (resourceUrl.charAt(startLoopIndex)== Paths.ESCAPE_CHAR) { return StringEscapeUtils.unescapeJava(StringUtils.strip(resourceUrl, Paths.ROOT)); } } if (startInclusiveIndex == 0 && endExclusiveIndex == resourceUrl.length()) { return resourceUrl; } return resourceUrl.substring(startInclusiveIndex, endExclusiveIndex); } private static boolean isResourceType(String resourcePathSegment) { if (StringUtils.isEmpty(resourcePathSegment)) { return false; } switch (resourcePathSegment.toLowerCase(Locale.ROOT)) { case Paths.ATTACHMENTS_PATH_SEGMENT: case Paths.COLLECTIONS_PATH_SEGMENT: case Paths.DATABASES_PATH_SEGMENT: case Paths.PERMISSIONS_PATH_SEGMENT: case Paths.USERS_PATH_SEGMENT: case Paths.DOCUMENTS_PATH_SEGMENT: case Paths.STORED_PROCEDURES_PATH_SEGMENT: case Paths.TRIGGERS_PATH_SEGMENT: case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: case Paths.CONFLICTS_PATH_SEGMENT: case Paths.MEDIA_PATH_SEGMENT: case Paths.OFFERS_PATH_SEGMENT: case Paths.PARTITIONS_PATH_SEGMENT: case Paths.DATABASE_ACCOUNT_PATH_SEGMENT: case Paths.TOPOLOGY_PATH_SEGMENT: case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT: case Paths.SCHEMAS_PATH_SEGMENT: case Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT: return true; default: return false; } } public static String generatePathForNameBased(ResourceType resourceType, String resourceOwnerFullName, String resourceName) { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; case DocumentCollection: return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; case StoredProcedure: return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; case UserDefinedFunction: return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; case Trigger: return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; case Attachment: return resourceOwnerFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + resourceName; case Conflict: return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; case Document: return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; case Offer: return resourceOwnerFullName + "/" + Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; case Permission: return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; case User: return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; case PartitionKeyRange: return resourceOwnerFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + resourceName; default: return null; } } public static String getDatabasePath(String resourceFullName) { if (resourceFullName != null) { int index = indexOfNth(resourceFullName, '/', 2); if (index > 0) return resourceFullName.substring(0, index); } return resourceFullName; } public static String getParentByIndex(String resourceFullName, int segmentIndex) { int index = indexOfNth(resourceFullName, '/', segmentIndex); if (index > 0) return resourceFullName.substring(0, index); else { index = indexOfNth(resourceFullName, '/', segmentIndex - 1); if (index > 0) return resourceFullName; else return null; } } public static boolean isNameBased(String resourceIdOrFullName) { if (resourceIdOrFullName != null && !resourceIdOrFullName.isEmpty() && resourceIdOrFullName.length() > 4 && resourceIdOrFullName.charAt(3) == '/') { return true; } return false; } private static int indexOfNth(String str, char value, int nthOccurance) { int remaining = nthOccurance; char[] characters = str.toCharArray(); for (int i = 0; i < characters.length; i++) { if (characters[i] == value) { remaining--; if (remaining == 0) { return i; } } } return -1; } public static ResourceType getResourcePathSegment(String resourcePathSegment) throws BadRequestException { if (StringUtils.isEmpty(resourcePathSegment)) { String message = String.format(RMResources.StringArgumentNullOrEmpty, "resourcePathSegment"); throw new BadRequestException(message); } switch (resourcePathSegment) { case Paths.ATTACHMENTS_PATH_SEGMENT: return ResourceType.Attachment; case Paths.COLLECTIONS_PATH_SEGMENT: return ResourceType.DocumentCollection; case Paths.DATABASES_PATH_SEGMENT: return ResourceType.Database; case Paths.PERMISSIONS_PATH_SEGMENT: return ResourceType.Permission; case Paths.USERS_PATH_SEGMENT: return ResourceType.User; case Paths.DOCUMENTS_PATH_SEGMENT: return ResourceType.Document; case Paths.STORED_PROCEDURES_PATH_SEGMENT: return ResourceType.StoredProcedure; case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: return ResourceType.UserDefinedFunction; case Paths.TRIGGERS_PATH_SEGMENT: return ResourceType.Trigger; case Paths.CONFLICTS_PATH_SEGMENT: return ResourceType.Conflict; case Paths.OFFERS_PATH_SEGMENT: return ResourceType.Offer; case Paths.SCHEMAS_PATH_SEGMENT: return ResourceType.Schema; } String errorMessage = String.format(RMResources.UnknownResourceType, resourcePathSegment); throw new BadRequestException(errorMessage); } public static String getResourcePath(ResourceType resourceType) throws BadRequestException { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case MasterPartition: case ServerPartition: return Paths.PARTITIONS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_ROOT; case Schema: return Paths.SCHEMAS_PATH_SEGMENT; case DatabaseAccount: case Topology: return Paths.ROOT; default: String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); throw new BadRequestException(errorMessage); } } public static boolean validateResourceFullName(ResourceType resourceType, String resourceFullName) { String[] segments = StringUtils.split(resourceFullName, '/'); String[] resourcePathArray = getResourcePathArray(resourceType); if (resourcePathArray == null) { return false; } if (segments.length != resourcePathArray.length * 2) { return false; } for (int i = 0; i < resourcePathArray.length; i++) { if(resourcePathArray[i].compareTo(segments[2 * i]) != 0) { return false; } } return true; } private static String[] getResourcePathArray(ResourceType resourceType) { List<String> segments = new ArrayList<String>(); segments.add(Paths.DATABASES_PATH_SEGMENT); if (resourceType == ResourceType.Permission || resourceType == ResourceType.User) { segments.add(Paths.USERS_PATH_SEGMENT); if (resourceType == ResourceType.Permission) { segments.add(Paths.PERMISSIONS_PATH_SEGMENT); } } else if (resourceType == ResourceType.ClientEncryptionKey) { segments.add(Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); } else if (resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.StoredProcedure || resourceType == ResourceType.UserDefinedFunction || resourceType == ResourceType.Trigger || resourceType == ResourceType.Conflict || resourceType == ResourceType.Attachment || resourceType == ResourceType.Document || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.Schema) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); if (resourceType == ResourceType.StoredProcedure) { segments.add(Paths.STORED_PROCEDURES_PATH_SEGMENT); } else if(resourceType == ResourceType.UserDefinedFunction) { segments.add(Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); } else if(resourceType == ResourceType.Trigger) { segments.add(Paths.TRIGGERS_PATH_SEGMENT); } else if (resourceType == ResourceType.Conflict) { segments.add(Paths.CONFLICTS_PATH_SEGMENT); } else if (resourceType == ResourceType.Schema) { segments.add(Paths.SCHEMAS_PATH_SEGMENT); } else if(resourceType == ResourceType.Document || resourceType == ResourceType.Attachment) { segments.add(Paths.DOCUMENTS_PATH_SEGMENT); if (resourceType == ResourceType.Attachment) { segments.add(Paths.ATTACHMENTS_PATH_SEGMENT); } } else if(resourceType == ResourceType.PartitionKeyRange) { segments.add(Paths.PARTITION_KEY_RANGES_PATH_SEGMENT); } else if (resourceType == ResourceType.PartitionKey) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); segments.add(Paths.OPERATIONS_PATH_SEGMENT); } } else if (resourceType != ResourceType.Database) { return null; } return segments.stream().toArray(String[]::new); } public static boolean validateResourceId(ResourceType resourceType, String resourceId) { if (resourceType == ResourceType.Conflict) { return PathsHelper.validateConflictId(resourceId); } else if (resourceType == ResourceType.Database) { return PathsHelper.validateDatabaseId(resourceId); } else if (resourceType == ResourceType.DocumentCollection) { return PathsHelper.validateDocumentCollectionId(resourceId); } else if (resourceType == ResourceType.Document) { return PathsHelper.validateDocumentId(resourceId); } else if (resourceType == ResourceType.Permission) { return PathsHelper.validatePermissionId(resourceId); } else if (resourceType == ResourceType.StoredProcedure) { return PathsHelper.validateStoredProcedureId(resourceId); } else if (resourceType == ResourceType.Trigger) { return PathsHelper.validateTriggerId(resourceId); } else if (resourceType == ResourceType.UserDefinedFunction) { return PathsHelper.validateUserDefinedFunctionId(resourceId); } else if (resourceType == ResourceType.User) { return PathsHelper.validateUserId(resourceId); } else if (resourceType == ResourceType.Attachment) { return PathsHelper.validateAttachmentId(resourceId); } else if (resourceType == ResourceType.ClientEncryptionKey) { return PathsHelper.validateClientEncryptionKeyId(resourceId); }else { logger.error(String.format("ValidateResourceId not implemented for Type %s in ResourceRequestHandler", resourceType.toString())); return false; } } public static boolean validateDatabaseId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDatabase() != 0; } public static boolean validateDocumentCollectionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocumentCollection() != 0; } public static boolean validateDocumentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocument() != 0; } public static boolean validateConflictId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getConflict() != 0; } public static boolean validateAttachmentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getAttachment() != 0; } public static boolean validatePermissionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getPermission() != 0; } public static boolean validateStoredProcedureId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getStoredProcedure() != 0; } public static boolean validateTriggerId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getTrigger() != 0; } public static boolean validateUserDefinedFunctionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUserDefinedFunction() != 0; } public static boolean validateUserId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUser() != 0; } public static boolean validateClientEncryptionKeyId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getClientEncryptionKey() != 0; } public static boolean isPublicResource(Resource resourceType) { if (resourceType instanceof Database || resourceType instanceof DocumentCollection || resourceType instanceof StoredProcedure || resourceType instanceof UserDefinedFunction || resourceType instanceof Trigger || resourceType instanceof Conflict || resourceType instanceof User || resourceType instanceof Permission || resourceType instanceof Document || resourceType instanceof Offer ) { return true; } else { return false; } } }
class PathsHelper { private final static Logger logger = LoggerFactory.getLogger(PathsHelper.class); public static String generatePath(ResourceType resourceType, RxDocumentServiceRequest request, boolean isFeed) { if (request.getIsNameBased()) { return generatePathForNameBased(resourceType, request.getResourceAddress(), isFeed, request.getOperationType()); } else { return generatePath(resourceType, request.getResourceId(), isFeed, request.getOperationType()); } } public static String generatePathForNameBased(Resource resourceType, String resourceOwnerFullName, String resourceName) { if (resourceName == null) return null; if (resourceType instanceof Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; } else if (resourceOwnerFullName == null) { return null; } else if (resourceType instanceof DocumentCollection) { return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof StoredProcedure) { return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof UserDefinedFunction) { return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Trigger) { return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Conflict) { return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof User) { return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Permission) { return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Document) { return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Resource) { return null; } String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } private static String generatePathForNameBased(ResourceType resourceType, String resourceFullName, boolean isFeed, OperationType operationType) { if (isFeed && Strings.isNullOrEmpty(resourceFullName) && resourceType != ResourceType.Database) { String errorMessage = String.format(RMResources.UnexpectedResourceType, resourceType); throw new IllegalArgumentException(errorMessage); } String resourcePath = null; if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { resourcePath = resourceFullName + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } else if (!isFeed) { resourcePath = resourceFullName; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { resourcePath = resourceFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { resourcePath = resourceFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { resourcePath = resourceFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { resourcePath = resourceFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { resourcePath = resourceFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { resourcePath = resourceFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { resourcePath = resourceFullName + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { resourcePath = resourceFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { resourcePath = resourceFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return resourceFullName + "/" + Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { return resourceFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.Schema) { resourcePath = resourceFullName + "/" + Paths.SCHEMAS_PATH_SEGMENT; } else if (resourceType == ResourceType.ClientEncryptionKey) { resourcePath = resourceFullName + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT; } else { String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } return resourcePath; } public static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed) { if (resourceType == ResourceType.PartitionKey) { return generatePath(resourceType, ownerOrResourceId, isFeed, OperationType.Delete); } else { return generatePath(resourceType, ownerOrResourceId, isFeed, null); } } private static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed, OperationType operationType) { if (isFeed && (ownerOrResourceId == null || ownerOrResourceId.isEmpty()) && resourceType != ResourceType.Database && resourceType != ResourceType.Offer && resourceType != ResourceType.MasterPartition && resourceType != ResourceType.ServerPartition && resourceType != ResourceType.DatabaseAccount && resourceType != ResourceType.Topology) { throw new IllegalStateException("INVALID resource type"); } if(ownerOrResourceId == null) { ownerOrResourceId = StringUtils.EMPTY; } if (isFeed && resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString(); } else if (isFeed && resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.StoredProcedure) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { ResourceId storedProcedureId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + storedProcedureId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + storedProcedureId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + storedProcedureId.getStoredProcedureId().toString(); } else if (isFeed && resourceType == ResourceType.UserDefinedFunction) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { ResourceId functionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + functionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + functionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + functionId.getUserDefinedFunctionId().toString(); } else if (isFeed && resourceType == ResourceType.Trigger) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { ResourceId triggerId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + triggerId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + triggerId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + triggerId.getTriggerId().toString(); } else if (isFeed && resourceType == ResourceType.Conflict) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { ResourceId conflictId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + conflictId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + conflictId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + conflictId.getConflictId().toString(); } else if (isFeed && resourceType == ResourceType.PartitionKeyRange) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { ResourceId partitionKeyRangeId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + partitionKeyRangeId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + partitionKeyRangeId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + partitionKeyRangeId.getPartitionKeyRangeId().toString(); } else if (isFeed && resourceType == ResourceType.Attachment) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { ResourceId attachmentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + attachmentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + attachmentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + attachmentId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + attachmentId.getAttachmentId().toString(); } else if (isFeed && resourceType == ResourceType.User) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString(); } else if (isFeed && resourceType == ResourceType.Permission) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { ResourceId permissionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + permissionId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + permissionId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + permissionId.getPermissionId().toString(); } else if (isFeed && resourceType == ResourceType.Document) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { ResourceId documentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentId.getDocumentId().toString(); } else if (isFeed && resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT; } else if (resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT; } else if (resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (resourceType == ResourceType.ClientEncryptionKey) { ResourceId clientEncryptionKeyId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + clientEncryptionKeyId.getDatabaseId().toString() + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT + "/" + clientEncryptionKeyId.getClientEncryptionKeyId().toString(); } else if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } String errorMessage = "invalid resource type"; throw new IllegalStateException(errorMessage); } public static PathInfo parsePathSegments(String resourceUrl) { String[] segments = StringUtils.strip(resourceUrl, "/").split("/"); if (segments == null || segments.length < 1) { return null; } int uriSegmentsCount = segments.length; String segmentOne = StringUtils.strip(segments[uriSegmentsCount - 1], "/"); String segmentTwo = (uriSegmentsCount >= 2) ? StringUtils.strip(segments[uriSegmentsCount - 2], "/") : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { return parseNameSegments(resourceUrl, segments); } } } if ((uriSegmentsCount % 2 != 0) && isResourceType(segmentOne)) { return new PathInfo(true, segmentOne, segmentOne.compareToIgnoreCase(Paths.DATABASES_PATH_SEGMENT) != 0 ? segmentTwo : StringUtils.EMPTY, false); } else if (isResourceType(segmentTwo)) { return new PathInfo(false, segmentTwo, segmentOne, false); } return null; } /** * Method which will return boolean based on whether it is able to parse the * path and name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param pathInfo Path info object which will hold information * @param clientVersion The Client version * @return */ public static boolean tryParsePathSegments(String resourceUrl, PathInfo pathInfo, String clientVersion) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; pathInfo.isFeed = false; pathInfo.isNameBased = false; if (StringUtils.isEmpty(resourceUrl)) { return false; } String trimmedStr = StringUtils.strip(resourceUrl, Constants.Properties.PATH_SEPARATOR); String[] segments = StringUtils.split(trimmedStr, Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { return false; } int uriSegmentsCount = segments.length; String segmentOne = segments[uriSegmentsCount - 1]; String segmentTwo = (uriSegmentsCount >= 2) ? segments[uriSegmentsCount - 2] : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.TOPOLOGY_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.RID_RANGE_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { pathInfo.isNameBased = true; return tryParseNameSegments(resourceUrl, segments, pathInfo); } } } if ((uriSegmentsCount % 2 != 0) && PathsHelper.isResourceType(segmentOne)) { pathInfo.isFeed = true; pathInfo.resourcePath = segmentOne; if (!segmentOne.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { pathInfo.resourceIdOrFullName = segmentTwo; } } else if (PathsHelper.isResourceType(segmentTwo)) { pathInfo.isFeed = false; pathInfo.resourcePath = segmentTwo; pathInfo.resourceIdOrFullName = segmentOne; if (!StringUtils.isEmpty(clientVersion) && pathInfo.resourcePath.equalsIgnoreCase(Paths.MEDIA_PATH_SEGMENT)) { } } else { return false; } return true; } /** * Method which will return boolean based on whether it is able to parse the * name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param segments * @param pathInfo Path info object which will hold information * @return */ private static boolean tryParseNameSegments(String resourceUrl, String[] segments, PathInfo pathInfo) { pathInfo.isFeed = false; pathInfo.resourceIdOrFullName = ""; pathInfo.resourcePath = ""; if (segments == null || segments.length < 1) { return false; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { pathInfo.resourcePath = segments[segments.length - 2]; pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceUrl); return true; } } else { if (isResourceType(segments[segments.length - 1])) { pathInfo.isFeed = true; pathInfo.resourcePath = segments[segments.length - 1]; String resourceIdOrFullName = resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl,Paths.ROOT).lastIndexOf(Paths.ROOT)); pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceIdOrFullName); return true; } } return false; } public static PathInfo parseNameSegments(String resourceUrl, String[] segments) { if (segments == null || segments.length < 1) { return null; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { return new PathInfo(false, segments[segments.length - 2], unescapeJavaAndTrim(resourceUrl), true); } } else { if (isResourceType(segments[segments.length - 1])) { return new PathInfo(true, segments[segments.length - 1], unescapeJavaAndTrim( resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl, Paths.ROOT).lastIndexOf(Paths.ROOT))), true); } } return null; } public static String unescapeJavaAndTrim(String resourceUrl) { if (resourceUrl == null) { return null; } int startInclusiveIndex = 0; while (startInclusiveIndex < resourceUrl.length() && resourceUrl.charAt(startInclusiveIndex) == Paths.ROOT_CHAR) { startInclusiveIndex++; } if (startInclusiveIndex == resourceUrl.length()) { return ""; } int endExclusiveIndex = resourceUrl.length(); while (endExclusiveIndex > startInclusiveIndex && resourceUrl.charAt(endExclusiveIndex - 1) == Paths.ROOT_CHAR) { endExclusiveIndex--; } for (int startLoopIndex = startInclusiveIndex; startLoopIndex < endExclusiveIndex; startLoopIndex++) { if (resourceUrl.charAt(startLoopIndex)== Paths.ESCAPE_CHAR) { return StringEscapeUtils.unescapeJava(StringUtils.strip(resourceUrl, Paths.ROOT)); } } if (startInclusiveIndex == 0 && endExclusiveIndex == resourceUrl.length()) { return resourceUrl; } return resourceUrl.substring(startInclusiveIndex, endExclusiveIndex); } private static boolean isResourceType(String resourcePathSegment) { if (StringUtils.isEmpty(resourcePathSegment)) { return false; } switch (resourcePathSegment.toLowerCase(Locale.ROOT)) { case Paths.ATTACHMENTS_PATH_SEGMENT: case Paths.COLLECTIONS_PATH_SEGMENT: case Paths.DATABASES_PATH_SEGMENT: case Paths.PERMISSIONS_PATH_SEGMENT: case Paths.USERS_PATH_SEGMENT: case Paths.DOCUMENTS_PATH_SEGMENT: case Paths.STORED_PROCEDURES_PATH_SEGMENT: case Paths.TRIGGERS_PATH_SEGMENT: case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: case Paths.CONFLICTS_PATH_SEGMENT: case Paths.MEDIA_PATH_SEGMENT: case Paths.OFFERS_PATH_SEGMENT: case Paths.PARTITIONS_PATH_SEGMENT: case Paths.DATABASE_ACCOUNT_PATH_SEGMENT: case Paths.TOPOLOGY_PATH_SEGMENT: case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT: case Paths.SCHEMAS_PATH_SEGMENT: case Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT: return true; default: return false; } } public static String generatePathForNameBased(ResourceType resourceType, String resourceOwnerFullName, String resourceName) { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; case DocumentCollection: return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; case StoredProcedure: return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; case UserDefinedFunction: return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; case Trigger: return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; case Attachment: return resourceOwnerFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + resourceName; case Conflict: return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; case Document: return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; case Offer: return resourceOwnerFullName + "/" + Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; case Permission: return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; case User: return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; case PartitionKeyRange: return resourceOwnerFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + resourceName; default: return null; } } public static String getDatabasePath(String resourceFullName) { if (resourceFullName != null) { int index = indexOfNth(resourceFullName, '/', 2); if (index > 0) return resourceFullName.substring(0, index); } return resourceFullName; } public static String getParentByIndex(String resourceFullName, int segmentIndex) { int index = indexOfNth(resourceFullName, '/', segmentIndex); if (index > 0) return resourceFullName.substring(0, index); else { index = indexOfNth(resourceFullName, '/', segmentIndex - 1); if (index > 0) return resourceFullName; else return null; } } public static boolean isNameBased(String resourceIdOrFullName) { if (resourceIdOrFullName != null && !resourceIdOrFullName.isEmpty() && resourceIdOrFullName.length() > 4 && resourceIdOrFullName.charAt(3) == '/') { return true; } return false; } private static int indexOfNth(String str, char value, int nthOccurance) { int remaining = nthOccurance; char[] characters = str.toCharArray(); for (int i = 0; i < characters.length; i++) { if (characters[i] == value) { remaining--; if (remaining == 0) { return i; } } } return -1; } public static ResourceType getResourcePathSegment(String resourcePathSegment) throws BadRequestException { if (StringUtils.isEmpty(resourcePathSegment)) { String message = String.format(RMResources.StringArgumentNullOrEmpty, "resourcePathSegment"); throw new BadRequestException(message); } switch (resourcePathSegment) { case Paths.ATTACHMENTS_PATH_SEGMENT: return ResourceType.Attachment; case Paths.COLLECTIONS_PATH_SEGMENT: return ResourceType.DocumentCollection; case Paths.DATABASES_PATH_SEGMENT: return ResourceType.Database; case Paths.PERMISSIONS_PATH_SEGMENT: return ResourceType.Permission; case Paths.USERS_PATH_SEGMENT: return ResourceType.User; case Paths.DOCUMENTS_PATH_SEGMENT: return ResourceType.Document; case Paths.STORED_PROCEDURES_PATH_SEGMENT: return ResourceType.StoredProcedure; case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: return ResourceType.UserDefinedFunction; case Paths.TRIGGERS_PATH_SEGMENT: return ResourceType.Trigger; case Paths.CONFLICTS_PATH_SEGMENT: return ResourceType.Conflict; case Paths.OFFERS_PATH_SEGMENT: return ResourceType.Offer; case Paths.SCHEMAS_PATH_SEGMENT: return ResourceType.Schema; } String errorMessage = String.format(RMResources.UnknownResourceType, resourcePathSegment); throw new BadRequestException(errorMessage); } public static String getResourcePath(ResourceType resourceType) throws BadRequestException { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case MasterPartition: case ServerPartition: return Paths.PARTITIONS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_ROOT; case Schema: return Paths.SCHEMAS_PATH_SEGMENT; case DatabaseAccount: case Topology: return Paths.ROOT; default: String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); throw new BadRequestException(errorMessage); } } public static boolean validateResourceFullName(ResourceType resourceType, String resourceFullName) { String[] segments = StringUtils.split(resourceFullName, '/'); String[] resourcePathArray = getResourcePathArray(resourceType); if (resourcePathArray == null) { return false; } if (segments.length != resourcePathArray.length * 2) { return false; } for (int i = 0; i < resourcePathArray.length; i++) { if(resourcePathArray[i].compareTo(segments[2 * i]) != 0) { return false; } } return true; } private static String[] getResourcePathArray(ResourceType resourceType) { List<String> segments = new ArrayList<String>(); segments.add(Paths.DATABASES_PATH_SEGMENT); if (resourceType == ResourceType.Permission || resourceType == ResourceType.User) { segments.add(Paths.USERS_PATH_SEGMENT); if (resourceType == ResourceType.Permission) { segments.add(Paths.PERMISSIONS_PATH_SEGMENT); } } else if (resourceType == ResourceType.ClientEncryptionKey) { segments.add(Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); } else if (resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.StoredProcedure || resourceType == ResourceType.UserDefinedFunction || resourceType == ResourceType.Trigger || resourceType == ResourceType.Conflict || resourceType == ResourceType.Attachment || resourceType == ResourceType.Document || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.Schema) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); if (resourceType == ResourceType.StoredProcedure) { segments.add(Paths.STORED_PROCEDURES_PATH_SEGMENT); } else if(resourceType == ResourceType.UserDefinedFunction) { segments.add(Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); } else if(resourceType == ResourceType.Trigger) { segments.add(Paths.TRIGGERS_PATH_SEGMENT); } else if (resourceType == ResourceType.Conflict) { segments.add(Paths.CONFLICTS_PATH_SEGMENT); } else if (resourceType == ResourceType.Schema) { segments.add(Paths.SCHEMAS_PATH_SEGMENT); } else if(resourceType == ResourceType.Document || resourceType == ResourceType.Attachment) { segments.add(Paths.DOCUMENTS_PATH_SEGMENT); if (resourceType == ResourceType.Attachment) { segments.add(Paths.ATTACHMENTS_PATH_SEGMENT); } } else if(resourceType == ResourceType.PartitionKeyRange) { segments.add(Paths.PARTITION_KEY_RANGES_PATH_SEGMENT); } else if (resourceType == ResourceType.PartitionKey) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); segments.add(Paths.OPERATIONS_PATH_SEGMENT); } } else if (resourceType != ResourceType.Database) { return null; } return segments.stream().toArray(String[]::new); } public static boolean validateResourceId(ResourceType resourceType, String resourceId) { if (resourceType == ResourceType.Conflict) { return PathsHelper.validateConflictId(resourceId); } else if (resourceType == ResourceType.Database) { return PathsHelper.validateDatabaseId(resourceId); } else if (resourceType == ResourceType.DocumentCollection) { return PathsHelper.validateDocumentCollectionId(resourceId); } else if (resourceType == ResourceType.Document) { return PathsHelper.validateDocumentId(resourceId); } else if (resourceType == ResourceType.Permission) { return PathsHelper.validatePermissionId(resourceId); } else if (resourceType == ResourceType.StoredProcedure) { return PathsHelper.validateStoredProcedureId(resourceId); } else if (resourceType == ResourceType.Trigger) { return PathsHelper.validateTriggerId(resourceId); } else if (resourceType == ResourceType.UserDefinedFunction) { return PathsHelper.validateUserDefinedFunctionId(resourceId); } else if (resourceType == ResourceType.User) { return PathsHelper.validateUserId(resourceId); } else if (resourceType == ResourceType.Attachment) { return PathsHelper.validateAttachmentId(resourceId); } else if (resourceType == ResourceType.ClientEncryptionKey) { return PathsHelper.validateClientEncryptionKeyId(resourceId); }else { logger.error(String.format("ValidateResourceId not implemented for Type %s in ResourceRequestHandler", resourceType.toString())); return false; } } public static boolean validateDatabaseId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDatabase() != 0; } public static boolean validateDocumentCollectionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocumentCollection() != 0; } public static boolean validateDocumentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocument() != 0; } public static boolean validateConflictId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getConflict() != 0; } public static boolean validateAttachmentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getAttachment() != 0; } public static boolean validatePermissionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getPermission() != 0; } public static boolean validateStoredProcedureId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getStoredProcedure() != 0; } public static boolean validateTriggerId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getTrigger() != 0; } public static boolean validateUserDefinedFunctionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUserDefinedFunction() != 0; } public static boolean validateUserId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUser() != 0; } public static boolean validateClientEncryptionKeyId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getClientEncryptionKey() != 0; } public static boolean isPublicResource(Resource resourceType) { if (resourceType instanceof Database || resourceType instanceof DocumentCollection || resourceType instanceof StoredProcedure || resourceType instanceof UserDefinedFunction || resourceType instanceof Trigger || resourceType instanceof Conflict || resourceType instanceof User || resourceType instanceof Permission || resourceType instanceof Document || resourceType instanceof Offer ) { return true; } else { return false; } } }
Mostly mutability w/o new heap allocations for `String`. With the if/else block I think allocation count should be same either way I suppose.
public static String getCollectionPath(String resourceFullName) { StringBuilder trimmedResourceFullName = new StringBuilder(); if (resourceFullName != null) { trimmedResourceFullName.append(Utils.trimBeginningAndEndingSlashes(resourceFullName)); int index = indexOfNth(trimmedResourceFullName.toString(), '/', 4); if (index > 0) return trimmedResourceFullName.substring(0, index); } return trimmedResourceFullName.length() == 0 ? resourceFullName : trimmedResourceFullName.toString(); }
StringBuilder trimmedResourceFullName = new StringBuilder();
public static String getCollectionPath(String resourceFullName) { if (resourceFullName != null) { String trimmedResourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName); int index = indexOfNth(trimmedResourceFullName.toString(), '/', 4); if (index > 0) { return trimmedResourceFullName.substring(0, index); } else { return trimmedResourceFullName; } } return resourceFullName; }
class PathsHelper { private final static Logger logger = LoggerFactory.getLogger(PathsHelper.class); public static String generatePath(ResourceType resourceType, RxDocumentServiceRequest request, boolean isFeed) { if (request.getIsNameBased()) { return generatePathForNameBased(resourceType, request.getResourceAddress(), isFeed, request.getOperationType()); } else { return generatePath(resourceType, request.getResourceId(), isFeed, request.getOperationType()); } } public static String generatePathForNameBased(Resource resourceType, String resourceOwnerFullName, String resourceName) { if (resourceName == null) return null; if (resourceType instanceof Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; } else if (resourceOwnerFullName == null) { return null; } else if (resourceType instanceof DocumentCollection) { return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof StoredProcedure) { return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof UserDefinedFunction) { return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Trigger) { return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Conflict) { return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof User) { return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Permission) { return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Document) { return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Resource) { return null; } String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } private static String generatePathForNameBased(ResourceType resourceType, String resourceFullName, boolean isFeed, OperationType operationType) { if (isFeed && Strings.isNullOrEmpty(resourceFullName) && resourceType != ResourceType.Database) { String errorMessage = String.format(RMResources.UnexpectedResourceType, resourceType); throw new IllegalArgumentException(errorMessage); } String resourcePath = null; if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { resourcePath = resourceFullName + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } else if (!isFeed) { resourcePath = resourceFullName; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { resourcePath = resourceFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { resourcePath = resourceFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { resourcePath = resourceFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { resourcePath = resourceFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { resourcePath = resourceFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { resourcePath = resourceFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { resourcePath = resourceFullName + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { resourcePath = resourceFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { resourcePath = resourceFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return resourceFullName + "/" + Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { return resourceFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.Schema) { resourcePath = resourceFullName + "/" + Paths.SCHEMAS_PATH_SEGMENT; } else if (resourceType == ResourceType.ClientEncryptionKey) { resourcePath = resourceFullName + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT; } else { String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } return resourcePath; } public static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed) { if (resourceType == ResourceType.PartitionKey) { return generatePath(resourceType, ownerOrResourceId, isFeed, OperationType.Delete); } else { return generatePath(resourceType, ownerOrResourceId, isFeed, null); } } private static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed, OperationType operationType) { if (isFeed && (ownerOrResourceId == null || ownerOrResourceId.isEmpty()) && resourceType != ResourceType.Database && resourceType != ResourceType.Offer && resourceType != ResourceType.MasterPartition && resourceType != ResourceType.ServerPartition && resourceType != ResourceType.DatabaseAccount && resourceType != ResourceType.Topology) { throw new IllegalStateException("INVALID resource type"); } if(ownerOrResourceId == null) { ownerOrResourceId = StringUtils.EMPTY; } if (isFeed && resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString(); } else if (isFeed && resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.StoredProcedure) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { ResourceId storedProcedureId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + storedProcedureId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + storedProcedureId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + storedProcedureId.getStoredProcedureId().toString(); } else if (isFeed && resourceType == ResourceType.UserDefinedFunction) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { ResourceId functionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + functionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + functionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + functionId.getUserDefinedFunctionId().toString(); } else if (isFeed && resourceType == ResourceType.Trigger) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { ResourceId triggerId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + triggerId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + triggerId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + triggerId.getTriggerId().toString(); } else if (isFeed && resourceType == ResourceType.Conflict) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { ResourceId conflictId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + conflictId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + conflictId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + conflictId.getConflictId().toString(); } else if (isFeed && resourceType == ResourceType.PartitionKeyRange) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { ResourceId partitionKeyRangeId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + partitionKeyRangeId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + partitionKeyRangeId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + partitionKeyRangeId.getPartitionKeyRangeId().toString(); } else if (isFeed && resourceType == ResourceType.Attachment) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { ResourceId attachmentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + attachmentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + attachmentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + attachmentId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + attachmentId.getAttachmentId().toString(); } else if (isFeed && resourceType == ResourceType.User) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString(); } else if (isFeed && resourceType == ResourceType.Permission) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { ResourceId permissionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + permissionId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + permissionId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + permissionId.getPermissionId().toString(); } else if (isFeed && resourceType == ResourceType.Document) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { ResourceId documentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentId.getDocumentId().toString(); } else if (isFeed && resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT; } else if (resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT; } else if (resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (resourceType == ResourceType.ClientEncryptionKey) { ResourceId clientEncryptionKeyId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + clientEncryptionKeyId.getDatabaseId().toString() + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT + "/" + clientEncryptionKeyId.getClientEncryptionKeyId().toString(); } else if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } String errorMessage = "invalid resource type"; throw new IllegalStateException(errorMessage); } public static PathInfo parsePathSegments(String resourceUrl) { String[] segments = StringUtils.strip(resourceUrl, "/").split("/"); if (segments == null || segments.length < 1) { return null; } int uriSegmentsCount = segments.length; String segmentOne = StringUtils.strip(segments[uriSegmentsCount - 1], "/"); String segmentTwo = (uriSegmentsCount >= 2) ? StringUtils.strip(segments[uriSegmentsCount - 2], "/") : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { return parseNameSegments(resourceUrl, segments); } } } if ((uriSegmentsCount % 2 != 0) && isResourceType(segmentOne)) { return new PathInfo(true, segmentOne, segmentOne.compareToIgnoreCase(Paths.DATABASES_PATH_SEGMENT) != 0 ? segmentTwo : StringUtils.EMPTY, false); } else if (isResourceType(segmentTwo)) { return new PathInfo(false, segmentTwo, segmentOne, false); } return null; } /** * Method which will return boolean based on whether it is able to parse the * path and name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param pathInfo Path info object which will hold information * @param clientVersion The Client version * @return */ public static boolean tryParsePathSegments(String resourceUrl, PathInfo pathInfo, String clientVersion) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; pathInfo.isFeed = false; pathInfo.isNameBased = false; if (StringUtils.isEmpty(resourceUrl)) { return false; } String trimmedStr = StringUtils.strip(resourceUrl, Constants.Properties.PATH_SEPARATOR); String[] segments = StringUtils.split(trimmedStr, Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { return false; } int uriSegmentsCount = segments.length; String segmentOne = segments[uriSegmentsCount - 1]; String segmentTwo = (uriSegmentsCount >= 2) ? segments[uriSegmentsCount - 2] : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.TOPOLOGY_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.RID_RANGE_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { pathInfo.isNameBased = true; return tryParseNameSegments(resourceUrl, segments, pathInfo); } } } if ((uriSegmentsCount % 2 != 0) && PathsHelper.isResourceType(segmentOne)) { pathInfo.isFeed = true; pathInfo.resourcePath = segmentOne; if (!segmentOne.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { pathInfo.resourceIdOrFullName = segmentTwo; } } else if (PathsHelper.isResourceType(segmentTwo)) { pathInfo.isFeed = false; pathInfo.resourcePath = segmentTwo; pathInfo.resourceIdOrFullName = segmentOne; if (!StringUtils.isEmpty(clientVersion) && pathInfo.resourcePath.equalsIgnoreCase(Paths.MEDIA_PATH_SEGMENT)) { } } else { return false; } return true; } /** * Method which will return boolean based on whether it is able to parse the * name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param segments * @param pathInfo Path info object which will hold information * @return */ private static boolean tryParseNameSegments(String resourceUrl, String[] segments, PathInfo pathInfo) { pathInfo.isFeed = false; pathInfo.resourceIdOrFullName = ""; pathInfo.resourcePath = ""; if (segments == null || segments.length < 1) { return false; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { pathInfo.resourcePath = segments[segments.length - 2]; pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceUrl); return true; } } else { if (isResourceType(segments[segments.length - 1])) { pathInfo.isFeed = true; pathInfo.resourcePath = segments[segments.length - 1]; String resourceIdOrFullName = resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl,Paths.ROOT).lastIndexOf(Paths.ROOT)); pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceIdOrFullName); return true; } } return false; } public static PathInfo parseNameSegments(String resourceUrl, String[] segments) { if (segments == null || segments.length < 1) { return null; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { return new PathInfo(false, segments[segments.length - 2], unescapeJavaAndTrim(resourceUrl), true); } } else { if (isResourceType(segments[segments.length - 1])) { return new PathInfo(true, segments[segments.length - 1], unescapeJavaAndTrim( resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl, Paths.ROOT).lastIndexOf(Paths.ROOT))), true); } } return null; } public static String unescapeJavaAndTrim(String resourceUrl) { if (resourceUrl == null) { return null; } int startInclusiveIndex = 0; while (startInclusiveIndex < resourceUrl.length() && resourceUrl.charAt(startInclusiveIndex) == Paths.ROOT_CHAR) { startInclusiveIndex++; } if (startInclusiveIndex == resourceUrl.length()) { return ""; } int endExclusiveIndex = resourceUrl.length(); while (endExclusiveIndex > startInclusiveIndex && resourceUrl.charAt(endExclusiveIndex - 1) == Paths.ROOT_CHAR) { endExclusiveIndex--; } for (int startLoopIndex = startInclusiveIndex; startLoopIndex < endExclusiveIndex; startLoopIndex++) { if (resourceUrl.charAt(startLoopIndex)== Paths.ESCAPE_CHAR) { return StringEscapeUtils.unescapeJava(StringUtils.strip(resourceUrl, Paths.ROOT)); } } if (startInclusiveIndex == 0 && endExclusiveIndex == resourceUrl.length()) { return resourceUrl; } return resourceUrl.substring(startInclusiveIndex, endExclusiveIndex); } private static boolean isResourceType(String resourcePathSegment) { if (StringUtils.isEmpty(resourcePathSegment)) { return false; } switch (resourcePathSegment.toLowerCase(Locale.ROOT)) { case Paths.ATTACHMENTS_PATH_SEGMENT: case Paths.COLLECTIONS_PATH_SEGMENT: case Paths.DATABASES_PATH_SEGMENT: case Paths.PERMISSIONS_PATH_SEGMENT: case Paths.USERS_PATH_SEGMENT: case Paths.DOCUMENTS_PATH_SEGMENT: case Paths.STORED_PROCEDURES_PATH_SEGMENT: case Paths.TRIGGERS_PATH_SEGMENT: case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: case Paths.CONFLICTS_PATH_SEGMENT: case Paths.MEDIA_PATH_SEGMENT: case Paths.OFFERS_PATH_SEGMENT: case Paths.PARTITIONS_PATH_SEGMENT: case Paths.DATABASE_ACCOUNT_PATH_SEGMENT: case Paths.TOPOLOGY_PATH_SEGMENT: case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT: case Paths.SCHEMAS_PATH_SEGMENT: case Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT: return true; default: return false; } } public static String generatePathForNameBased(ResourceType resourceType, String resourceOwnerFullName, String resourceName) { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; case DocumentCollection: return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; case StoredProcedure: return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; case UserDefinedFunction: return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; case Trigger: return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; case Attachment: return resourceOwnerFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + resourceName; case Conflict: return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; case Document: return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; case Offer: return resourceOwnerFullName + "/" + Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; case Permission: return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; case User: return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; case PartitionKeyRange: return resourceOwnerFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + resourceName; default: return null; } } public static String getDatabasePath(String resourceFullName) { if (resourceFullName != null) { int index = indexOfNth(resourceFullName, '/', 2); if (index > 0) return resourceFullName.substring(0, index); } return resourceFullName; } public static String getParentByIndex(String resourceFullName, int segmentIndex) { int index = indexOfNth(resourceFullName, '/', segmentIndex); if (index > 0) return resourceFullName.substring(0, index); else { index = indexOfNth(resourceFullName, '/', segmentIndex - 1); if (index > 0) return resourceFullName; else return null; } } public static boolean isNameBased(String resourceIdOrFullName) { if (resourceIdOrFullName != null && !resourceIdOrFullName.isEmpty() && resourceIdOrFullName.length() > 4 && resourceIdOrFullName.charAt(3) == '/') { return true; } return false; } private static int indexOfNth(String str, char value, int nthOccurance) { int remaining = nthOccurance; char[] characters = str.toCharArray(); for (int i = 0; i < characters.length; i++) { if (characters[i] == value) { remaining--; if (remaining == 0) { return i; } } } return -1; } public static ResourceType getResourcePathSegment(String resourcePathSegment) throws BadRequestException { if (StringUtils.isEmpty(resourcePathSegment)) { String message = String.format(RMResources.StringArgumentNullOrEmpty, "resourcePathSegment"); throw new BadRequestException(message); } switch (resourcePathSegment) { case Paths.ATTACHMENTS_PATH_SEGMENT: return ResourceType.Attachment; case Paths.COLLECTIONS_PATH_SEGMENT: return ResourceType.DocumentCollection; case Paths.DATABASES_PATH_SEGMENT: return ResourceType.Database; case Paths.PERMISSIONS_PATH_SEGMENT: return ResourceType.Permission; case Paths.USERS_PATH_SEGMENT: return ResourceType.User; case Paths.DOCUMENTS_PATH_SEGMENT: return ResourceType.Document; case Paths.STORED_PROCEDURES_PATH_SEGMENT: return ResourceType.StoredProcedure; case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: return ResourceType.UserDefinedFunction; case Paths.TRIGGERS_PATH_SEGMENT: return ResourceType.Trigger; case Paths.CONFLICTS_PATH_SEGMENT: return ResourceType.Conflict; case Paths.OFFERS_PATH_SEGMENT: return ResourceType.Offer; case Paths.SCHEMAS_PATH_SEGMENT: return ResourceType.Schema; } String errorMessage = String.format(RMResources.UnknownResourceType, resourcePathSegment); throw new BadRequestException(errorMessage); } public static String getResourcePath(ResourceType resourceType) throws BadRequestException { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case MasterPartition: case ServerPartition: return Paths.PARTITIONS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_ROOT; case Schema: return Paths.SCHEMAS_PATH_SEGMENT; case DatabaseAccount: case Topology: return Paths.ROOT; default: String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); throw new BadRequestException(errorMessage); } } public static boolean validateResourceFullName(ResourceType resourceType, String resourceFullName) { String[] segments = StringUtils.split(resourceFullName, '/'); String[] resourcePathArray = getResourcePathArray(resourceType); if (resourcePathArray == null) { return false; } if (segments.length != resourcePathArray.length * 2) { return false; } for (int i = 0; i < resourcePathArray.length; i++) { if(resourcePathArray[i].compareTo(segments[2 * i]) != 0) { return false; } } return true; } private static String[] getResourcePathArray(ResourceType resourceType) { List<String> segments = new ArrayList<String>(); segments.add(Paths.DATABASES_PATH_SEGMENT); if (resourceType == ResourceType.Permission || resourceType == ResourceType.User) { segments.add(Paths.USERS_PATH_SEGMENT); if (resourceType == ResourceType.Permission) { segments.add(Paths.PERMISSIONS_PATH_SEGMENT); } } else if (resourceType == ResourceType.ClientEncryptionKey) { segments.add(Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); } else if (resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.StoredProcedure || resourceType == ResourceType.UserDefinedFunction || resourceType == ResourceType.Trigger || resourceType == ResourceType.Conflict || resourceType == ResourceType.Attachment || resourceType == ResourceType.Document || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.Schema) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); if (resourceType == ResourceType.StoredProcedure) { segments.add(Paths.STORED_PROCEDURES_PATH_SEGMENT); } else if(resourceType == ResourceType.UserDefinedFunction) { segments.add(Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); } else if(resourceType == ResourceType.Trigger) { segments.add(Paths.TRIGGERS_PATH_SEGMENT); } else if (resourceType == ResourceType.Conflict) { segments.add(Paths.CONFLICTS_PATH_SEGMENT); } else if (resourceType == ResourceType.Schema) { segments.add(Paths.SCHEMAS_PATH_SEGMENT); } else if(resourceType == ResourceType.Document || resourceType == ResourceType.Attachment) { segments.add(Paths.DOCUMENTS_PATH_SEGMENT); if (resourceType == ResourceType.Attachment) { segments.add(Paths.ATTACHMENTS_PATH_SEGMENT); } } else if(resourceType == ResourceType.PartitionKeyRange) { segments.add(Paths.PARTITION_KEY_RANGES_PATH_SEGMENT); } else if (resourceType == ResourceType.PartitionKey) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); segments.add(Paths.OPERATIONS_PATH_SEGMENT); } } else if (resourceType != ResourceType.Database) { return null; } return segments.stream().toArray(String[]::new); } public static boolean validateResourceId(ResourceType resourceType, String resourceId) { if (resourceType == ResourceType.Conflict) { return PathsHelper.validateConflictId(resourceId); } else if (resourceType == ResourceType.Database) { return PathsHelper.validateDatabaseId(resourceId); } else if (resourceType == ResourceType.DocumentCollection) { return PathsHelper.validateDocumentCollectionId(resourceId); } else if (resourceType == ResourceType.Document) { return PathsHelper.validateDocumentId(resourceId); } else if (resourceType == ResourceType.Permission) { return PathsHelper.validatePermissionId(resourceId); } else if (resourceType == ResourceType.StoredProcedure) { return PathsHelper.validateStoredProcedureId(resourceId); } else if (resourceType == ResourceType.Trigger) { return PathsHelper.validateTriggerId(resourceId); } else if (resourceType == ResourceType.UserDefinedFunction) { return PathsHelper.validateUserDefinedFunctionId(resourceId); } else if (resourceType == ResourceType.User) { return PathsHelper.validateUserId(resourceId); } else if (resourceType == ResourceType.Attachment) { return PathsHelper.validateAttachmentId(resourceId); } else if (resourceType == ResourceType.ClientEncryptionKey) { return PathsHelper.validateClientEncryptionKeyId(resourceId); }else { logger.error(String.format("ValidateResourceId not implemented for Type %s in ResourceRequestHandler", resourceType.toString())); return false; } } public static boolean validateDatabaseId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDatabase() != 0; } public static boolean validateDocumentCollectionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocumentCollection() != 0; } public static boolean validateDocumentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocument() != 0; } public static boolean validateConflictId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getConflict() != 0; } public static boolean validateAttachmentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getAttachment() != 0; } public static boolean validatePermissionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getPermission() != 0; } public static boolean validateStoredProcedureId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getStoredProcedure() != 0; } public static boolean validateTriggerId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getTrigger() != 0; } public static boolean validateUserDefinedFunctionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUserDefinedFunction() != 0; } public static boolean validateUserId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUser() != 0; } public static boolean validateClientEncryptionKeyId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getClientEncryptionKey() != 0; } public static boolean isPublicResource(Resource resourceType) { if (resourceType instanceof Database || resourceType instanceof DocumentCollection || resourceType instanceof StoredProcedure || resourceType instanceof UserDefinedFunction || resourceType instanceof Trigger || resourceType instanceof Conflict || resourceType instanceof User || resourceType instanceof Permission || resourceType instanceof Document || resourceType instanceof Offer ) { return true; } else { return false; } } }
class PathsHelper { private final static Logger logger = LoggerFactory.getLogger(PathsHelper.class); public static String generatePath(ResourceType resourceType, RxDocumentServiceRequest request, boolean isFeed) { if (request.getIsNameBased()) { return generatePathForNameBased(resourceType, request.getResourceAddress(), isFeed, request.getOperationType()); } else { return generatePath(resourceType, request.getResourceId(), isFeed, request.getOperationType()); } } public static String generatePathForNameBased(Resource resourceType, String resourceOwnerFullName, String resourceName) { if (resourceName == null) return null; if (resourceType instanceof Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; } else if (resourceOwnerFullName == null) { return null; } else if (resourceType instanceof DocumentCollection) { return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof StoredProcedure) { return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof UserDefinedFunction) { return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Trigger) { return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Conflict) { return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof User) { return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Permission) { return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Document) { return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Resource) { return null; } String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } private static String generatePathForNameBased(ResourceType resourceType, String resourceFullName, boolean isFeed, OperationType operationType) { if (isFeed && Strings.isNullOrEmpty(resourceFullName) && resourceType != ResourceType.Database) { String errorMessage = String.format(RMResources.UnexpectedResourceType, resourceType); throw new IllegalArgumentException(errorMessage); } String resourcePath = null; if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { resourcePath = resourceFullName + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } else if (!isFeed) { resourcePath = resourceFullName; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { resourcePath = resourceFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { resourcePath = resourceFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { resourcePath = resourceFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { resourcePath = resourceFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { resourcePath = resourceFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { resourcePath = resourceFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { resourcePath = resourceFullName + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { resourcePath = resourceFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { resourcePath = resourceFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return resourceFullName + "/" + Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { return resourceFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.Schema) { resourcePath = resourceFullName + "/" + Paths.SCHEMAS_PATH_SEGMENT; } else if (resourceType == ResourceType.ClientEncryptionKey) { resourcePath = resourceFullName + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT; } else { String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } return resourcePath; } public static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed) { if (resourceType == ResourceType.PartitionKey) { return generatePath(resourceType, ownerOrResourceId, isFeed, OperationType.Delete); } else { return generatePath(resourceType, ownerOrResourceId, isFeed, null); } } private static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed, OperationType operationType) { if (isFeed && (ownerOrResourceId == null || ownerOrResourceId.isEmpty()) && resourceType != ResourceType.Database && resourceType != ResourceType.Offer && resourceType != ResourceType.MasterPartition && resourceType != ResourceType.ServerPartition && resourceType != ResourceType.DatabaseAccount && resourceType != ResourceType.Topology) { throw new IllegalStateException("INVALID resource type"); } if(ownerOrResourceId == null) { ownerOrResourceId = StringUtils.EMPTY; } if (isFeed && resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString(); } else if (isFeed && resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.StoredProcedure) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { ResourceId storedProcedureId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + storedProcedureId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + storedProcedureId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + storedProcedureId.getStoredProcedureId().toString(); } else if (isFeed && resourceType == ResourceType.UserDefinedFunction) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { ResourceId functionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + functionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + functionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + functionId.getUserDefinedFunctionId().toString(); } else if (isFeed && resourceType == ResourceType.Trigger) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { ResourceId triggerId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + triggerId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + triggerId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + triggerId.getTriggerId().toString(); } else if (isFeed && resourceType == ResourceType.Conflict) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { ResourceId conflictId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + conflictId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + conflictId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + conflictId.getConflictId().toString(); } else if (isFeed && resourceType == ResourceType.PartitionKeyRange) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { ResourceId partitionKeyRangeId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + partitionKeyRangeId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + partitionKeyRangeId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + partitionKeyRangeId.getPartitionKeyRangeId().toString(); } else if (isFeed && resourceType == ResourceType.Attachment) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { ResourceId attachmentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + attachmentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + attachmentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + attachmentId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + attachmentId.getAttachmentId().toString(); } else if (isFeed && resourceType == ResourceType.User) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString(); } else if (isFeed && resourceType == ResourceType.Permission) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { ResourceId permissionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + permissionId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + permissionId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + permissionId.getPermissionId().toString(); } else if (isFeed && resourceType == ResourceType.Document) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { ResourceId documentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentId.getDocumentId().toString(); } else if (isFeed && resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT; } else if (resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT; } else if (resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (resourceType == ResourceType.ClientEncryptionKey) { ResourceId clientEncryptionKeyId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + clientEncryptionKeyId.getDatabaseId().toString() + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT + "/" + clientEncryptionKeyId.getClientEncryptionKeyId().toString(); } else if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } String errorMessage = "invalid resource type"; throw new IllegalStateException(errorMessage); } public static PathInfo parsePathSegments(String resourceUrl) { String[] segments = StringUtils.strip(resourceUrl, "/").split("/"); if (segments == null || segments.length < 1) { return null; } int uriSegmentsCount = segments.length; String segmentOne = StringUtils.strip(segments[uriSegmentsCount - 1], "/"); String segmentTwo = (uriSegmentsCount >= 2) ? StringUtils.strip(segments[uriSegmentsCount - 2], "/") : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { return parseNameSegments(resourceUrl, segments); } } } if ((uriSegmentsCount % 2 != 0) && isResourceType(segmentOne)) { return new PathInfo(true, segmentOne, segmentOne.compareToIgnoreCase(Paths.DATABASES_PATH_SEGMENT) != 0 ? segmentTwo : StringUtils.EMPTY, false); } else if (isResourceType(segmentTwo)) { return new PathInfo(false, segmentTwo, segmentOne, false); } return null; } /** * Method which will return boolean based on whether it is able to parse the * path and name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param pathInfo Path info object which will hold information * @param clientVersion The Client version * @return */ public static boolean tryParsePathSegments(String resourceUrl, PathInfo pathInfo, String clientVersion) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; pathInfo.isFeed = false; pathInfo.isNameBased = false; if (StringUtils.isEmpty(resourceUrl)) { return false; } String trimmedStr = StringUtils.strip(resourceUrl, Constants.Properties.PATH_SEPARATOR); String[] segments = StringUtils.split(trimmedStr, Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { return false; } int uriSegmentsCount = segments.length; String segmentOne = segments[uriSegmentsCount - 1]; String segmentTwo = (uriSegmentsCount >= 2) ? segments[uriSegmentsCount - 2] : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.TOPOLOGY_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.RID_RANGE_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { pathInfo.isNameBased = true; return tryParseNameSegments(resourceUrl, segments, pathInfo); } } } if ((uriSegmentsCount % 2 != 0) && PathsHelper.isResourceType(segmentOne)) { pathInfo.isFeed = true; pathInfo.resourcePath = segmentOne; if (!segmentOne.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { pathInfo.resourceIdOrFullName = segmentTwo; } } else if (PathsHelper.isResourceType(segmentTwo)) { pathInfo.isFeed = false; pathInfo.resourcePath = segmentTwo; pathInfo.resourceIdOrFullName = segmentOne; if (!StringUtils.isEmpty(clientVersion) && pathInfo.resourcePath.equalsIgnoreCase(Paths.MEDIA_PATH_SEGMENT)) { } } else { return false; } return true; } /** * Method which will return boolean based on whether it is able to parse the * name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param segments * @param pathInfo Path info object which will hold information * @return */ private static boolean tryParseNameSegments(String resourceUrl, String[] segments, PathInfo pathInfo) { pathInfo.isFeed = false; pathInfo.resourceIdOrFullName = ""; pathInfo.resourcePath = ""; if (segments == null || segments.length < 1) { return false; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { pathInfo.resourcePath = segments[segments.length - 2]; pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceUrl); return true; } } else { if (isResourceType(segments[segments.length - 1])) { pathInfo.isFeed = true; pathInfo.resourcePath = segments[segments.length - 1]; String resourceIdOrFullName = resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl,Paths.ROOT).lastIndexOf(Paths.ROOT)); pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceIdOrFullName); return true; } } return false; } public static PathInfo parseNameSegments(String resourceUrl, String[] segments) { if (segments == null || segments.length < 1) { return null; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { return new PathInfo(false, segments[segments.length - 2], unescapeJavaAndTrim(resourceUrl), true); } } else { if (isResourceType(segments[segments.length - 1])) { return new PathInfo(true, segments[segments.length - 1], unescapeJavaAndTrim( resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl, Paths.ROOT).lastIndexOf(Paths.ROOT))), true); } } return null; } public static String unescapeJavaAndTrim(String resourceUrl) { if (resourceUrl == null) { return null; } int startInclusiveIndex = 0; while (startInclusiveIndex < resourceUrl.length() && resourceUrl.charAt(startInclusiveIndex) == Paths.ROOT_CHAR) { startInclusiveIndex++; } if (startInclusiveIndex == resourceUrl.length()) { return ""; } int endExclusiveIndex = resourceUrl.length(); while (endExclusiveIndex > startInclusiveIndex && resourceUrl.charAt(endExclusiveIndex - 1) == Paths.ROOT_CHAR) { endExclusiveIndex--; } for (int startLoopIndex = startInclusiveIndex; startLoopIndex < endExclusiveIndex; startLoopIndex++) { if (resourceUrl.charAt(startLoopIndex)== Paths.ESCAPE_CHAR) { return StringEscapeUtils.unescapeJava(StringUtils.strip(resourceUrl, Paths.ROOT)); } } if (startInclusiveIndex == 0 && endExclusiveIndex == resourceUrl.length()) { return resourceUrl; } return resourceUrl.substring(startInclusiveIndex, endExclusiveIndex); } private static boolean isResourceType(String resourcePathSegment) { if (StringUtils.isEmpty(resourcePathSegment)) { return false; } switch (resourcePathSegment.toLowerCase(Locale.ROOT)) { case Paths.ATTACHMENTS_PATH_SEGMENT: case Paths.COLLECTIONS_PATH_SEGMENT: case Paths.DATABASES_PATH_SEGMENT: case Paths.PERMISSIONS_PATH_SEGMENT: case Paths.USERS_PATH_SEGMENT: case Paths.DOCUMENTS_PATH_SEGMENT: case Paths.STORED_PROCEDURES_PATH_SEGMENT: case Paths.TRIGGERS_PATH_SEGMENT: case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: case Paths.CONFLICTS_PATH_SEGMENT: case Paths.MEDIA_PATH_SEGMENT: case Paths.OFFERS_PATH_SEGMENT: case Paths.PARTITIONS_PATH_SEGMENT: case Paths.DATABASE_ACCOUNT_PATH_SEGMENT: case Paths.TOPOLOGY_PATH_SEGMENT: case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT: case Paths.SCHEMAS_PATH_SEGMENT: case Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT: return true; default: return false; } } public static String generatePathForNameBased(ResourceType resourceType, String resourceOwnerFullName, String resourceName) { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; case DocumentCollection: return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; case StoredProcedure: return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; case UserDefinedFunction: return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; case Trigger: return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; case Attachment: return resourceOwnerFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + resourceName; case Conflict: return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; case Document: return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; case Offer: return resourceOwnerFullName + "/" + Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; case Permission: return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; case User: return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; case PartitionKeyRange: return resourceOwnerFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + resourceName; default: return null; } } public static String getDatabasePath(String resourceFullName) { if (resourceFullName != null) { int index = indexOfNth(resourceFullName, '/', 2); if (index > 0) return resourceFullName.substring(0, index); } return resourceFullName; } public static String getParentByIndex(String resourceFullName, int segmentIndex) { int index = indexOfNth(resourceFullName, '/', segmentIndex); if (index > 0) return resourceFullName.substring(0, index); else { index = indexOfNth(resourceFullName, '/', segmentIndex - 1); if (index > 0) return resourceFullName; else return null; } } public static boolean isNameBased(String resourceIdOrFullName) { if (resourceIdOrFullName != null && !resourceIdOrFullName.isEmpty() && resourceIdOrFullName.length() > 4 && resourceIdOrFullName.charAt(3) == '/') { return true; } return false; } private static int indexOfNth(String str, char value, int nthOccurance) { int remaining = nthOccurance; char[] characters = str.toCharArray(); for (int i = 0; i < characters.length; i++) { if (characters[i] == value) { remaining--; if (remaining == 0) { return i; } } } return -1; } public static ResourceType getResourcePathSegment(String resourcePathSegment) throws BadRequestException { if (StringUtils.isEmpty(resourcePathSegment)) { String message = String.format(RMResources.StringArgumentNullOrEmpty, "resourcePathSegment"); throw new BadRequestException(message); } switch (resourcePathSegment) { case Paths.ATTACHMENTS_PATH_SEGMENT: return ResourceType.Attachment; case Paths.COLLECTIONS_PATH_SEGMENT: return ResourceType.DocumentCollection; case Paths.DATABASES_PATH_SEGMENT: return ResourceType.Database; case Paths.PERMISSIONS_PATH_SEGMENT: return ResourceType.Permission; case Paths.USERS_PATH_SEGMENT: return ResourceType.User; case Paths.DOCUMENTS_PATH_SEGMENT: return ResourceType.Document; case Paths.STORED_PROCEDURES_PATH_SEGMENT: return ResourceType.StoredProcedure; case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: return ResourceType.UserDefinedFunction; case Paths.TRIGGERS_PATH_SEGMENT: return ResourceType.Trigger; case Paths.CONFLICTS_PATH_SEGMENT: return ResourceType.Conflict; case Paths.OFFERS_PATH_SEGMENT: return ResourceType.Offer; case Paths.SCHEMAS_PATH_SEGMENT: return ResourceType.Schema; } String errorMessage = String.format(RMResources.UnknownResourceType, resourcePathSegment); throw new BadRequestException(errorMessage); } public static String getResourcePath(ResourceType resourceType) throws BadRequestException { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case MasterPartition: case ServerPartition: return Paths.PARTITIONS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_ROOT; case Schema: return Paths.SCHEMAS_PATH_SEGMENT; case DatabaseAccount: case Topology: return Paths.ROOT; default: String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); throw new BadRequestException(errorMessage); } } public static boolean validateResourceFullName(ResourceType resourceType, String resourceFullName) { String[] segments = StringUtils.split(resourceFullName, '/'); String[] resourcePathArray = getResourcePathArray(resourceType); if (resourcePathArray == null) { return false; } if (segments.length != resourcePathArray.length * 2) { return false; } for (int i = 0; i < resourcePathArray.length; i++) { if(resourcePathArray[i].compareTo(segments[2 * i]) != 0) { return false; } } return true; } private static String[] getResourcePathArray(ResourceType resourceType) { List<String> segments = new ArrayList<String>(); segments.add(Paths.DATABASES_PATH_SEGMENT); if (resourceType == ResourceType.Permission || resourceType == ResourceType.User) { segments.add(Paths.USERS_PATH_SEGMENT); if (resourceType == ResourceType.Permission) { segments.add(Paths.PERMISSIONS_PATH_SEGMENT); } } else if (resourceType == ResourceType.ClientEncryptionKey) { segments.add(Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); } else if (resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.StoredProcedure || resourceType == ResourceType.UserDefinedFunction || resourceType == ResourceType.Trigger || resourceType == ResourceType.Conflict || resourceType == ResourceType.Attachment || resourceType == ResourceType.Document || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.Schema) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); if (resourceType == ResourceType.StoredProcedure) { segments.add(Paths.STORED_PROCEDURES_PATH_SEGMENT); } else if(resourceType == ResourceType.UserDefinedFunction) { segments.add(Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); } else if(resourceType == ResourceType.Trigger) { segments.add(Paths.TRIGGERS_PATH_SEGMENT); } else if (resourceType == ResourceType.Conflict) { segments.add(Paths.CONFLICTS_PATH_SEGMENT); } else if (resourceType == ResourceType.Schema) { segments.add(Paths.SCHEMAS_PATH_SEGMENT); } else if(resourceType == ResourceType.Document || resourceType == ResourceType.Attachment) { segments.add(Paths.DOCUMENTS_PATH_SEGMENT); if (resourceType == ResourceType.Attachment) { segments.add(Paths.ATTACHMENTS_PATH_SEGMENT); } } else if(resourceType == ResourceType.PartitionKeyRange) { segments.add(Paths.PARTITION_KEY_RANGES_PATH_SEGMENT); } else if (resourceType == ResourceType.PartitionKey) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); segments.add(Paths.OPERATIONS_PATH_SEGMENT); } } else if (resourceType != ResourceType.Database) { return null; } return segments.stream().toArray(String[]::new); } public static boolean validateResourceId(ResourceType resourceType, String resourceId) { if (resourceType == ResourceType.Conflict) { return PathsHelper.validateConflictId(resourceId); } else if (resourceType == ResourceType.Database) { return PathsHelper.validateDatabaseId(resourceId); } else if (resourceType == ResourceType.DocumentCollection) { return PathsHelper.validateDocumentCollectionId(resourceId); } else if (resourceType == ResourceType.Document) { return PathsHelper.validateDocumentId(resourceId); } else if (resourceType == ResourceType.Permission) { return PathsHelper.validatePermissionId(resourceId); } else if (resourceType == ResourceType.StoredProcedure) { return PathsHelper.validateStoredProcedureId(resourceId); } else if (resourceType == ResourceType.Trigger) { return PathsHelper.validateTriggerId(resourceId); } else if (resourceType == ResourceType.UserDefinedFunction) { return PathsHelper.validateUserDefinedFunctionId(resourceId); } else if (resourceType == ResourceType.User) { return PathsHelper.validateUserId(resourceId); } else if (resourceType == ResourceType.Attachment) { return PathsHelper.validateAttachmentId(resourceId); } else if (resourceType == ResourceType.ClientEncryptionKey) { return PathsHelper.validateClientEncryptionKeyId(resourceId); }else { logger.error(String.format("ValidateResourceId not implemented for Type %s in ResourceRequestHandler", resourceType.toString())); return false; } } public static boolean validateDatabaseId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDatabase() != 0; } public static boolean validateDocumentCollectionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocumentCollection() != 0; } public static boolean validateDocumentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocument() != 0; } public static boolean validateConflictId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getConflict() != 0; } public static boolean validateAttachmentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getAttachment() != 0; } public static boolean validatePermissionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getPermission() != 0; } public static boolean validateStoredProcedureId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getStoredProcedure() != 0; } public static boolean validateTriggerId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getTrigger() != 0; } public static boolean validateUserDefinedFunctionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUserDefinedFunction() != 0; } public static boolean validateUserId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUser() != 0; } public static boolean validateClientEncryptionKeyId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getClientEncryptionKey() != 0; } public static boolean isPublicResource(Resource resourceType) { if (resourceType instanceof Database || resourceType instanceof DocumentCollection || resourceType instanceof StoredProcedure || resourceType instanceof UserDefinedFunction || resourceType instanceof Trigger || resourceType instanceof Conflict || resourceType instanceof User || resourceType instanceof Permission || resourceType instanceof Document || resourceType instanceof Offer ) { return true; } else { return false; } } }
Fixed in next iteration.
public static String getCollectionPath(String resourceFullName) { StringBuilder trimmedResourceFullName = new StringBuilder(); if (resourceFullName != null) { trimmedResourceFullName.append(Utils.trimBeginningAndEndingSlashes(resourceFullName)); int index = indexOfNth(trimmedResourceFullName.toString(), '/', 4); if (index > 0) return trimmedResourceFullName.substring(0, index); } return trimmedResourceFullName.length() == 0 ? resourceFullName : trimmedResourceFullName.toString(); }
StringBuilder trimmedResourceFullName = new StringBuilder();
public static String getCollectionPath(String resourceFullName) { if (resourceFullName != null) { String trimmedResourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName); int index = indexOfNth(trimmedResourceFullName.toString(), '/', 4); if (index > 0) { return trimmedResourceFullName.substring(0, index); } else { return trimmedResourceFullName; } } return resourceFullName; }
class PathsHelper { private final static Logger logger = LoggerFactory.getLogger(PathsHelper.class); public static String generatePath(ResourceType resourceType, RxDocumentServiceRequest request, boolean isFeed) { if (request.getIsNameBased()) { return generatePathForNameBased(resourceType, request.getResourceAddress(), isFeed, request.getOperationType()); } else { return generatePath(resourceType, request.getResourceId(), isFeed, request.getOperationType()); } } public static String generatePathForNameBased(Resource resourceType, String resourceOwnerFullName, String resourceName) { if (resourceName == null) return null; if (resourceType instanceof Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; } else if (resourceOwnerFullName == null) { return null; } else if (resourceType instanceof DocumentCollection) { return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof StoredProcedure) { return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof UserDefinedFunction) { return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Trigger) { return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Conflict) { return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof User) { return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Permission) { return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Document) { return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Resource) { return null; } String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } private static String generatePathForNameBased(ResourceType resourceType, String resourceFullName, boolean isFeed, OperationType operationType) { if (isFeed && Strings.isNullOrEmpty(resourceFullName) && resourceType != ResourceType.Database) { String errorMessage = String.format(RMResources.UnexpectedResourceType, resourceType); throw new IllegalArgumentException(errorMessage); } String resourcePath = null; if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { resourcePath = resourceFullName + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } else if (!isFeed) { resourcePath = resourceFullName; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { resourcePath = resourceFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { resourcePath = resourceFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { resourcePath = resourceFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { resourcePath = resourceFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { resourcePath = resourceFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { resourcePath = resourceFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { resourcePath = resourceFullName + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { resourcePath = resourceFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { resourcePath = resourceFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return resourceFullName + "/" + Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { return resourceFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.Schema) { resourcePath = resourceFullName + "/" + Paths.SCHEMAS_PATH_SEGMENT; } else if (resourceType == ResourceType.ClientEncryptionKey) { resourcePath = resourceFullName + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT; } else { String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } return resourcePath; } public static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed) { if (resourceType == ResourceType.PartitionKey) { return generatePath(resourceType, ownerOrResourceId, isFeed, OperationType.Delete); } else { return generatePath(resourceType, ownerOrResourceId, isFeed, null); } } private static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed, OperationType operationType) { if (isFeed && (ownerOrResourceId == null || ownerOrResourceId.isEmpty()) && resourceType != ResourceType.Database && resourceType != ResourceType.Offer && resourceType != ResourceType.MasterPartition && resourceType != ResourceType.ServerPartition && resourceType != ResourceType.DatabaseAccount && resourceType != ResourceType.Topology) { throw new IllegalStateException("INVALID resource type"); } if(ownerOrResourceId == null) { ownerOrResourceId = StringUtils.EMPTY; } if (isFeed && resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString(); } else if (isFeed && resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.StoredProcedure) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { ResourceId storedProcedureId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + storedProcedureId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + storedProcedureId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + storedProcedureId.getStoredProcedureId().toString(); } else if (isFeed && resourceType == ResourceType.UserDefinedFunction) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { ResourceId functionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + functionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + functionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + functionId.getUserDefinedFunctionId().toString(); } else if (isFeed && resourceType == ResourceType.Trigger) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { ResourceId triggerId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + triggerId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + triggerId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + triggerId.getTriggerId().toString(); } else if (isFeed && resourceType == ResourceType.Conflict) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { ResourceId conflictId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + conflictId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + conflictId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + conflictId.getConflictId().toString(); } else if (isFeed && resourceType == ResourceType.PartitionKeyRange) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { ResourceId partitionKeyRangeId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + partitionKeyRangeId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + partitionKeyRangeId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + partitionKeyRangeId.getPartitionKeyRangeId().toString(); } else if (isFeed && resourceType == ResourceType.Attachment) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { ResourceId attachmentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + attachmentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + attachmentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + attachmentId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + attachmentId.getAttachmentId().toString(); } else if (isFeed && resourceType == ResourceType.User) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString(); } else if (isFeed && resourceType == ResourceType.Permission) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { ResourceId permissionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + permissionId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + permissionId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + permissionId.getPermissionId().toString(); } else if (isFeed && resourceType == ResourceType.Document) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { ResourceId documentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentId.getDocumentId().toString(); } else if (isFeed && resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT; } else if (resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT; } else if (resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (resourceType == ResourceType.ClientEncryptionKey) { ResourceId clientEncryptionKeyId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + clientEncryptionKeyId.getDatabaseId().toString() + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT + "/" + clientEncryptionKeyId.getClientEncryptionKeyId().toString(); } else if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } String errorMessage = "invalid resource type"; throw new IllegalStateException(errorMessage); } public static PathInfo parsePathSegments(String resourceUrl) { String[] segments = StringUtils.strip(resourceUrl, "/").split("/"); if (segments == null || segments.length < 1) { return null; } int uriSegmentsCount = segments.length; String segmentOne = StringUtils.strip(segments[uriSegmentsCount - 1], "/"); String segmentTwo = (uriSegmentsCount >= 2) ? StringUtils.strip(segments[uriSegmentsCount - 2], "/") : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { return parseNameSegments(resourceUrl, segments); } } } if ((uriSegmentsCount % 2 != 0) && isResourceType(segmentOne)) { return new PathInfo(true, segmentOne, segmentOne.compareToIgnoreCase(Paths.DATABASES_PATH_SEGMENT) != 0 ? segmentTwo : StringUtils.EMPTY, false); } else if (isResourceType(segmentTwo)) { return new PathInfo(false, segmentTwo, segmentOne, false); } return null; } /** * Method which will return boolean based on whether it is able to parse the * path and name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param pathInfo Path info object which will hold information * @param clientVersion The Client version * @return */ public static boolean tryParsePathSegments(String resourceUrl, PathInfo pathInfo, String clientVersion) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; pathInfo.isFeed = false; pathInfo.isNameBased = false; if (StringUtils.isEmpty(resourceUrl)) { return false; } String trimmedStr = StringUtils.strip(resourceUrl, Constants.Properties.PATH_SEPARATOR); String[] segments = StringUtils.split(trimmedStr, Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { return false; } int uriSegmentsCount = segments.length; String segmentOne = segments[uriSegmentsCount - 1]; String segmentTwo = (uriSegmentsCount >= 2) ? segments[uriSegmentsCount - 2] : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.TOPOLOGY_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.RID_RANGE_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { pathInfo.isNameBased = true; return tryParseNameSegments(resourceUrl, segments, pathInfo); } } } if ((uriSegmentsCount % 2 != 0) && PathsHelper.isResourceType(segmentOne)) { pathInfo.isFeed = true; pathInfo.resourcePath = segmentOne; if (!segmentOne.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { pathInfo.resourceIdOrFullName = segmentTwo; } } else if (PathsHelper.isResourceType(segmentTwo)) { pathInfo.isFeed = false; pathInfo.resourcePath = segmentTwo; pathInfo.resourceIdOrFullName = segmentOne; if (!StringUtils.isEmpty(clientVersion) && pathInfo.resourcePath.equalsIgnoreCase(Paths.MEDIA_PATH_SEGMENT)) { } } else { return false; } return true; } /** * Method which will return boolean based on whether it is able to parse the * name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param segments * @param pathInfo Path info object which will hold information * @return */ private static boolean tryParseNameSegments(String resourceUrl, String[] segments, PathInfo pathInfo) { pathInfo.isFeed = false; pathInfo.resourceIdOrFullName = ""; pathInfo.resourcePath = ""; if (segments == null || segments.length < 1) { return false; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { pathInfo.resourcePath = segments[segments.length - 2]; pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceUrl); return true; } } else { if (isResourceType(segments[segments.length - 1])) { pathInfo.isFeed = true; pathInfo.resourcePath = segments[segments.length - 1]; String resourceIdOrFullName = resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl,Paths.ROOT).lastIndexOf(Paths.ROOT)); pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceIdOrFullName); return true; } } return false; } public static PathInfo parseNameSegments(String resourceUrl, String[] segments) { if (segments == null || segments.length < 1) { return null; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { return new PathInfo(false, segments[segments.length - 2], unescapeJavaAndTrim(resourceUrl), true); } } else { if (isResourceType(segments[segments.length - 1])) { return new PathInfo(true, segments[segments.length - 1], unescapeJavaAndTrim( resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl, Paths.ROOT).lastIndexOf(Paths.ROOT))), true); } } return null; } public static String unescapeJavaAndTrim(String resourceUrl) { if (resourceUrl == null) { return null; } int startInclusiveIndex = 0; while (startInclusiveIndex < resourceUrl.length() && resourceUrl.charAt(startInclusiveIndex) == Paths.ROOT_CHAR) { startInclusiveIndex++; } if (startInclusiveIndex == resourceUrl.length()) { return ""; } int endExclusiveIndex = resourceUrl.length(); while (endExclusiveIndex > startInclusiveIndex && resourceUrl.charAt(endExclusiveIndex - 1) == Paths.ROOT_CHAR) { endExclusiveIndex--; } for (int startLoopIndex = startInclusiveIndex; startLoopIndex < endExclusiveIndex; startLoopIndex++) { if (resourceUrl.charAt(startLoopIndex)== Paths.ESCAPE_CHAR) { return StringEscapeUtils.unescapeJava(StringUtils.strip(resourceUrl, Paths.ROOT)); } } if (startInclusiveIndex == 0 && endExclusiveIndex == resourceUrl.length()) { return resourceUrl; } return resourceUrl.substring(startInclusiveIndex, endExclusiveIndex); } private static boolean isResourceType(String resourcePathSegment) { if (StringUtils.isEmpty(resourcePathSegment)) { return false; } switch (resourcePathSegment.toLowerCase(Locale.ROOT)) { case Paths.ATTACHMENTS_PATH_SEGMENT: case Paths.COLLECTIONS_PATH_SEGMENT: case Paths.DATABASES_PATH_SEGMENT: case Paths.PERMISSIONS_PATH_SEGMENT: case Paths.USERS_PATH_SEGMENT: case Paths.DOCUMENTS_PATH_SEGMENT: case Paths.STORED_PROCEDURES_PATH_SEGMENT: case Paths.TRIGGERS_PATH_SEGMENT: case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: case Paths.CONFLICTS_PATH_SEGMENT: case Paths.MEDIA_PATH_SEGMENT: case Paths.OFFERS_PATH_SEGMENT: case Paths.PARTITIONS_PATH_SEGMENT: case Paths.DATABASE_ACCOUNT_PATH_SEGMENT: case Paths.TOPOLOGY_PATH_SEGMENT: case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT: case Paths.SCHEMAS_PATH_SEGMENT: case Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT: return true; default: return false; } } public static String generatePathForNameBased(ResourceType resourceType, String resourceOwnerFullName, String resourceName) { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; case DocumentCollection: return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; case StoredProcedure: return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; case UserDefinedFunction: return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; case Trigger: return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; case Attachment: return resourceOwnerFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + resourceName; case Conflict: return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; case Document: return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; case Offer: return resourceOwnerFullName + "/" + Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; case Permission: return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; case User: return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; case PartitionKeyRange: return resourceOwnerFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + resourceName; default: return null; } } public static String getDatabasePath(String resourceFullName) { if (resourceFullName != null) { int index = indexOfNth(resourceFullName, '/', 2); if (index > 0) return resourceFullName.substring(0, index); } return resourceFullName; } public static String getParentByIndex(String resourceFullName, int segmentIndex) { int index = indexOfNth(resourceFullName, '/', segmentIndex); if (index > 0) return resourceFullName.substring(0, index); else { index = indexOfNth(resourceFullName, '/', segmentIndex - 1); if (index > 0) return resourceFullName; else return null; } } public static boolean isNameBased(String resourceIdOrFullName) { if (resourceIdOrFullName != null && !resourceIdOrFullName.isEmpty() && resourceIdOrFullName.length() > 4 && resourceIdOrFullName.charAt(3) == '/') { return true; } return false; } private static int indexOfNth(String str, char value, int nthOccurance) { int remaining = nthOccurance; char[] characters = str.toCharArray(); for (int i = 0; i < characters.length; i++) { if (characters[i] == value) { remaining--; if (remaining == 0) { return i; } } } return -1; } public static ResourceType getResourcePathSegment(String resourcePathSegment) throws BadRequestException { if (StringUtils.isEmpty(resourcePathSegment)) { String message = String.format(RMResources.StringArgumentNullOrEmpty, "resourcePathSegment"); throw new BadRequestException(message); } switch (resourcePathSegment) { case Paths.ATTACHMENTS_PATH_SEGMENT: return ResourceType.Attachment; case Paths.COLLECTIONS_PATH_SEGMENT: return ResourceType.DocumentCollection; case Paths.DATABASES_PATH_SEGMENT: return ResourceType.Database; case Paths.PERMISSIONS_PATH_SEGMENT: return ResourceType.Permission; case Paths.USERS_PATH_SEGMENT: return ResourceType.User; case Paths.DOCUMENTS_PATH_SEGMENT: return ResourceType.Document; case Paths.STORED_PROCEDURES_PATH_SEGMENT: return ResourceType.StoredProcedure; case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: return ResourceType.UserDefinedFunction; case Paths.TRIGGERS_PATH_SEGMENT: return ResourceType.Trigger; case Paths.CONFLICTS_PATH_SEGMENT: return ResourceType.Conflict; case Paths.OFFERS_PATH_SEGMENT: return ResourceType.Offer; case Paths.SCHEMAS_PATH_SEGMENT: return ResourceType.Schema; } String errorMessage = String.format(RMResources.UnknownResourceType, resourcePathSegment); throw new BadRequestException(errorMessage); } public static String getResourcePath(ResourceType resourceType) throws BadRequestException { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case MasterPartition: case ServerPartition: return Paths.PARTITIONS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_ROOT; case Schema: return Paths.SCHEMAS_PATH_SEGMENT; case DatabaseAccount: case Topology: return Paths.ROOT; default: String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); throw new BadRequestException(errorMessage); } } public static boolean validateResourceFullName(ResourceType resourceType, String resourceFullName) { String[] segments = StringUtils.split(resourceFullName, '/'); String[] resourcePathArray = getResourcePathArray(resourceType); if (resourcePathArray == null) { return false; } if (segments.length != resourcePathArray.length * 2) { return false; } for (int i = 0; i < resourcePathArray.length; i++) { if(resourcePathArray[i].compareTo(segments[2 * i]) != 0) { return false; } } return true; } private static String[] getResourcePathArray(ResourceType resourceType) { List<String> segments = new ArrayList<String>(); segments.add(Paths.DATABASES_PATH_SEGMENT); if (resourceType == ResourceType.Permission || resourceType == ResourceType.User) { segments.add(Paths.USERS_PATH_SEGMENT); if (resourceType == ResourceType.Permission) { segments.add(Paths.PERMISSIONS_PATH_SEGMENT); } } else if (resourceType == ResourceType.ClientEncryptionKey) { segments.add(Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); } else if (resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.StoredProcedure || resourceType == ResourceType.UserDefinedFunction || resourceType == ResourceType.Trigger || resourceType == ResourceType.Conflict || resourceType == ResourceType.Attachment || resourceType == ResourceType.Document || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.Schema) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); if (resourceType == ResourceType.StoredProcedure) { segments.add(Paths.STORED_PROCEDURES_PATH_SEGMENT); } else if(resourceType == ResourceType.UserDefinedFunction) { segments.add(Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); } else if(resourceType == ResourceType.Trigger) { segments.add(Paths.TRIGGERS_PATH_SEGMENT); } else if (resourceType == ResourceType.Conflict) { segments.add(Paths.CONFLICTS_PATH_SEGMENT); } else if (resourceType == ResourceType.Schema) { segments.add(Paths.SCHEMAS_PATH_SEGMENT); } else if(resourceType == ResourceType.Document || resourceType == ResourceType.Attachment) { segments.add(Paths.DOCUMENTS_PATH_SEGMENT); if (resourceType == ResourceType.Attachment) { segments.add(Paths.ATTACHMENTS_PATH_SEGMENT); } } else if(resourceType == ResourceType.PartitionKeyRange) { segments.add(Paths.PARTITION_KEY_RANGES_PATH_SEGMENT); } else if (resourceType == ResourceType.PartitionKey) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); segments.add(Paths.OPERATIONS_PATH_SEGMENT); } } else if (resourceType != ResourceType.Database) { return null; } return segments.stream().toArray(String[]::new); } public static boolean validateResourceId(ResourceType resourceType, String resourceId) { if (resourceType == ResourceType.Conflict) { return PathsHelper.validateConflictId(resourceId); } else if (resourceType == ResourceType.Database) { return PathsHelper.validateDatabaseId(resourceId); } else if (resourceType == ResourceType.DocumentCollection) { return PathsHelper.validateDocumentCollectionId(resourceId); } else if (resourceType == ResourceType.Document) { return PathsHelper.validateDocumentId(resourceId); } else if (resourceType == ResourceType.Permission) { return PathsHelper.validatePermissionId(resourceId); } else if (resourceType == ResourceType.StoredProcedure) { return PathsHelper.validateStoredProcedureId(resourceId); } else if (resourceType == ResourceType.Trigger) { return PathsHelper.validateTriggerId(resourceId); } else if (resourceType == ResourceType.UserDefinedFunction) { return PathsHelper.validateUserDefinedFunctionId(resourceId); } else if (resourceType == ResourceType.User) { return PathsHelper.validateUserId(resourceId); } else if (resourceType == ResourceType.Attachment) { return PathsHelper.validateAttachmentId(resourceId); } else if (resourceType == ResourceType.ClientEncryptionKey) { return PathsHelper.validateClientEncryptionKeyId(resourceId); }else { logger.error(String.format("ValidateResourceId not implemented for Type %s in ResourceRequestHandler", resourceType.toString())); return false; } } public static boolean validateDatabaseId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDatabase() != 0; } public static boolean validateDocumentCollectionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocumentCollection() != 0; } public static boolean validateDocumentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocument() != 0; } public static boolean validateConflictId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getConflict() != 0; } public static boolean validateAttachmentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getAttachment() != 0; } public static boolean validatePermissionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getPermission() != 0; } public static boolean validateStoredProcedureId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getStoredProcedure() != 0; } public static boolean validateTriggerId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getTrigger() != 0; } public static boolean validateUserDefinedFunctionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUserDefinedFunction() != 0; } public static boolean validateUserId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUser() != 0; } public static boolean validateClientEncryptionKeyId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getClientEncryptionKey() != 0; } public static boolean isPublicResource(Resource resourceType) { if (resourceType instanceof Database || resourceType instanceof DocumentCollection || resourceType instanceof StoredProcedure || resourceType instanceof UserDefinedFunction || resourceType instanceof Trigger || resourceType instanceof Conflict || resourceType instanceof User || resourceType instanceof Permission || resourceType instanceof Document || resourceType instanceof Offer ) { return true; } else { return false; } } }
class PathsHelper { private final static Logger logger = LoggerFactory.getLogger(PathsHelper.class); public static String generatePath(ResourceType resourceType, RxDocumentServiceRequest request, boolean isFeed) { if (request.getIsNameBased()) { return generatePathForNameBased(resourceType, request.getResourceAddress(), isFeed, request.getOperationType()); } else { return generatePath(resourceType, request.getResourceId(), isFeed, request.getOperationType()); } } public static String generatePathForNameBased(Resource resourceType, String resourceOwnerFullName, String resourceName) { if (resourceName == null) return null; if (resourceType instanceof Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; } else if (resourceOwnerFullName == null) { return null; } else if (resourceType instanceof DocumentCollection) { return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof StoredProcedure) { return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof UserDefinedFunction) { return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Trigger) { return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Conflict) { return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof User) { return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Permission) { return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Document) { return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; } else if (resourceType instanceof Resource) { return null; } String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } private static String generatePathForNameBased(ResourceType resourceType, String resourceFullName, boolean isFeed, OperationType operationType) { if (isFeed && Strings.isNullOrEmpty(resourceFullName) && resourceType != ResourceType.Database) { String errorMessage = String.format(RMResources.UnexpectedResourceType, resourceType); throw new IllegalArgumentException(errorMessage); } String resourcePath = null; if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { resourcePath = resourceFullName + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } else if (!isFeed) { resourcePath = resourceFullName; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { resourcePath = resourceFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { resourcePath = resourceFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { resourcePath = resourceFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { resourcePath = resourceFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { resourcePath = resourceFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { resourcePath = resourceFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { resourcePath = resourceFullName + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { resourcePath = resourceFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { resourcePath = resourceFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return resourceFullName + "/" + Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { return resourceFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.Schema) { resourcePath = resourceFullName + "/" + Paths.SCHEMAS_PATH_SEGMENT; } else if (resourceType == ResourceType.ClientEncryptionKey) { resourcePath = resourceFullName + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT; } else { String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); assert false : errorMessage; throw new IllegalArgumentException(errorMessage); } return resourcePath; } public static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed) { if (resourceType == ResourceType.PartitionKey) { return generatePath(resourceType, ownerOrResourceId, isFeed, OperationType.Delete); } else { return generatePath(resourceType, ownerOrResourceId, isFeed, null); } } private static String generatePath(ResourceType resourceType, String ownerOrResourceId, boolean isFeed, OperationType operationType) { if (isFeed && (ownerOrResourceId == null || ownerOrResourceId.isEmpty()) && resourceType != ResourceType.Database && resourceType != ResourceType.Offer && resourceType != ResourceType.MasterPartition && resourceType != ResourceType.ServerPartition && resourceType != ResourceType.DatabaseAccount && resourceType != ResourceType.Topology) { throw new IllegalStateException("INVALID resource type"); } if(ownerOrResourceId == null) { ownerOrResourceId = StringUtils.EMPTY; } if (isFeed && resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT; } else if (resourceType == ResourceType.Database) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.DocumentCollection) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString(); } else if (isFeed && resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Offer) { return Paths.OFFERS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.StoredProcedure) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT; } else if (resourceType == ResourceType.StoredProcedure) { ResourceId storedProcedureId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + storedProcedureId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + storedProcedureId.getDocumentCollectionId().toString() + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + storedProcedureId.getStoredProcedureId().toString(); } else if (isFeed && resourceType == ResourceType.UserDefinedFunction) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.UserDefinedFunction) { ResourceId functionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + functionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + functionId.getDocumentCollectionId().toString() + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + functionId.getUserDefinedFunctionId().toString(); } else if (isFeed && resourceType == ResourceType.Trigger) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT; } else if (resourceType == ResourceType.Trigger) { ResourceId triggerId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + triggerId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + triggerId.getDocumentCollectionId().toString() + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + triggerId.getTriggerId().toString(); } else if (isFeed && resourceType == ResourceType.Conflict) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Conflict) { ResourceId conflictId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + conflictId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + conflictId.getDocumentCollectionId().toString() + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + conflictId.getConflictId().toString(); } else if (isFeed && resourceType == ResourceType.PartitionKeyRange) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; } else if (resourceType == ResourceType.PartitionKeyRange) { ResourceId partitionKeyRangeId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + partitionKeyRangeId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + partitionKeyRangeId.getDocumentCollectionId().toString() + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + partitionKeyRangeId.getPartitionKeyRangeId().toString(); } else if (isFeed && resourceType == ResourceType.Attachment) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Attachment) { ResourceId attachmentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + attachmentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + attachmentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + attachmentId.getDocumentId().toString() + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + attachmentId.getAttachmentId().toString(); } else if (isFeed && resourceType == ResourceType.User) { return Paths.DATABASES_PATH_SEGMENT + "/" + ownerOrResourceId + "/" + Paths.USERS_PATH_SEGMENT; } else if (resourceType == ResourceType.User) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString(); } else if (isFeed && resourceType == ResourceType.Permission) { ResourceId userId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + userId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + userId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.Permission) { ResourceId permissionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + permissionId.getDatabaseId().toString() + "/" + Paths.USERS_PATH_SEGMENT + "/" + permissionId.getUserId().toString() + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + permissionId.getPermissionId().toString(); } else if (isFeed && resourceType == ResourceType.Document) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT; } else if (resourceType == ResourceType.Document) { ResourceId documentId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentId.getDocumentCollectionId().toString() + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + documentId.getDocumentId().toString(); } else if (isFeed && resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.MasterPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT; } else if (resourceType == ResourceType.ServerPartition) { return Paths.PARTITIONS_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT; } else if (resourceType == ResourceType.Topology) { return Paths.TOPOLOGY_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (isFeed && resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT; } else if (resourceType == ResourceType.DatabaseAccount) { return Paths.DATABASE_ACCOUNT_PATH_SEGMENT + "/" + ownerOrResourceId; } else if (resourceType == ResourceType.ClientEncryptionKey) { ResourceId clientEncryptionKeyId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + clientEncryptionKeyId.getDatabaseId().toString() + "/" + Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT + "/" + clientEncryptionKeyId.getClientEncryptionKeyId().toString(); } else if (resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { ResourceId documentCollectionId = ResourceId.parse(ownerOrResourceId); return Paths.DATABASES_PATH_SEGMENT + "/" + documentCollectionId.getDatabaseId().toString() + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + documentCollectionId.getDocumentCollectionId().toString() + "/" + Paths.OPERATIONS_PATH_SEGMENT + "/" + Paths.PARTITION_KEY_DELETE_PATH_SEGMENT; } String errorMessage = "invalid resource type"; throw new IllegalStateException(errorMessage); } public static PathInfo parsePathSegments(String resourceUrl) { String[] segments = StringUtils.strip(resourceUrl, "/").split("/"); if (segments == null || segments.length < 1) { return null; } int uriSegmentsCount = segments.length; String segmentOne = StringUtils.strip(segments[uriSegmentsCount - 1], "/"); String segmentTwo = (uriSegmentsCount >= 2) ? StringUtils.strip(segments[uriSegmentsCount - 2], "/") : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { return parseNameSegments(resourceUrl, segments); } } } if ((uriSegmentsCount % 2 != 0) && isResourceType(segmentOne)) { return new PathInfo(true, segmentOne, segmentOne.compareToIgnoreCase(Paths.DATABASES_PATH_SEGMENT) != 0 ? segmentTwo : StringUtils.EMPTY, false); } else if (isResourceType(segmentTwo)) { return new PathInfo(false, segmentTwo, segmentOne, false); } return null; } /** * Method which will return boolean based on whether it is able to parse the * path and name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param pathInfo Path info object which will hold information * @param clientVersion The Client version * @return */ public static boolean tryParsePathSegments(String resourceUrl, PathInfo pathInfo, String clientVersion) { pathInfo.resourcePath = StringUtils.EMPTY; pathInfo.resourceIdOrFullName = StringUtils.EMPTY; pathInfo.isFeed = false; pathInfo.isNameBased = false; if (StringUtils.isEmpty(resourceUrl)) { return false; } String trimmedStr = StringUtils.strip(resourceUrl, Constants.Properties.PATH_SEPARATOR); String[] segments = StringUtils.split(trimmedStr, Constants.Properties.PATH_SEPARATOR); if (segments == null || segments.length < 1) { return false; } int uriSegmentsCount = segments.length; String segmentOne = segments[uriSegmentsCount - 1]; String segmentTwo = (uriSegmentsCount >= 2) ? segments[uriSegmentsCount - 2] : StringUtils.EMPTY; if (uriSegmentsCount >= 2) { if (Paths.MEDIA_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.OFFERS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.PARTITIONS_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.DATABASE_ACCOUNT_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.TOPOLOGY_PATH_SEGMENT.compareTo(segments[0]) != 0 && Paths.RID_RANGE_PATH_SEGMENT.compareTo(segments[0]) != 0) { Pair<Boolean, ResourceId> result = ResourceId.tryParse(segments[1]); if (!result.getLeft() || !result.getRight().isDatabaseId()) { pathInfo.isNameBased = true; return tryParseNameSegments(resourceUrl, segments, pathInfo); } } } if ((uriSegmentsCount % 2 != 0) && PathsHelper.isResourceType(segmentOne)) { pathInfo.isFeed = true; pathInfo.resourcePath = segmentOne; if (!segmentOne.equalsIgnoreCase(Paths.DATABASES_PATH_SEGMENT)) { pathInfo.resourceIdOrFullName = segmentTwo; } } else if (PathsHelper.isResourceType(segmentTwo)) { pathInfo.isFeed = false; pathInfo.resourcePath = segmentTwo; pathInfo.resourceIdOrFullName = segmentOne; if (!StringUtils.isEmpty(clientVersion) && pathInfo.resourcePath.equalsIgnoreCase(Paths.MEDIA_PATH_SEGMENT)) { } } else { return false; } return true; } /** * Method which will return boolean based on whether it is able to parse the * name segment from resource url , and fill info in PathInfo object * @param resourceUrl Complete ResourceLink * @param segments * @param pathInfo Path info object which will hold information * @return */ private static boolean tryParseNameSegments(String resourceUrl, String[] segments, PathInfo pathInfo) { pathInfo.isFeed = false; pathInfo.resourceIdOrFullName = ""; pathInfo.resourcePath = ""; if (segments == null || segments.length < 1) { return false; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { pathInfo.resourcePath = segments[segments.length - 2]; pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceUrl); return true; } } else { if (isResourceType(segments[segments.length - 1])) { pathInfo.isFeed = true; pathInfo.resourcePath = segments[segments.length - 1]; String resourceIdOrFullName = resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl,Paths.ROOT).lastIndexOf(Paths.ROOT)); pathInfo.resourceIdOrFullName = unescapeJavaAndTrim(resourceIdOrFullName); return true; } } return false; } public static PathInfo parseNameSegments(String resourceUrl, String[] segments) { if (segments == null || segments.length < 1) { return null; } if (segments.length % 2 == 0) { if (isResourceType(segments[segments.length - 2])) { return new PathInfo(false, segments[segments.length - 2], unescapeJavaAndTrim(resourceUrl), true); } } else { if (isResourceType(segments[segments.length - 1])) { return new PathInfo(true, segments[segments.length - 1], unescapeJavaAndTrim( resourceUrl.substring(0, StringUtils.removeEnd(resourceUrl, Paths.ROOT).lastIndexOf(Paths.ROOT))), true); } } return null; } public static String unescapeJavaAndTrim(String resourceUrl) { if (resourceUrl == null) { return null; } int startInclusiveIndex = 0; while (startInclusiveIndex < resourceUrl.length() && resourceUrl.charAt(startInclusiveIndex) == Paths.ROOT_CHAR) { startInclusiveIndex++; } if (startInclusiveIndex == resourceUrl.length()) { return ""; } int endExclusiveIndex = resourceUrl.length(); while (endExclusiveIndex > startInclusiveIndex && resourceUrl.charAt(endExclusiveIndex - 1) == Paths.ROOT_CHAR) { endExclusiveIndex--; } for (int startLoopIndex = startInclusiveIndex; startLoopIndex < endExclusiveIndex; startLoopIndex++) { if (resourceUrl.charAt(startLoopIndex)== Paths.ESCAPE_CHAR) { return StringEscapeUtils.unescapeJava(StringUtils.strip(resourceUrl, Paths.ROOT)); } } if (startInclusiveIndex == 0 && endExclusiveIndex == resourceUrl.length()) { return resourceUrl; } return resourceUrl.substring(startInclusiveIndex, endExclusiveIndex); } private static boolean isResourceType(String resourcePathSegment) { if (StringUtils.isEmpty(resourcePathSegment)) { return false; } switch (resourcePathSegment.toLowerCase(Locale.ROOT)) { case Paths.ATTACHMENTS_PATH_SEGMENT: case Paths.COLLECTIONS_PATH_SEGMENT: case Paths.DATABASES_PATH_SEGMENT: case Paths.PERMISSIONS_PATH_SEGMENT: case Paths.USERS_PATH_SEGMENT: case Paths.DOCUMENTS_PATH_SEGMENT: case Paths.STORED_PROCEDURES_PATH_SEGMENT: case Paths.TRIGGERS_PATH_SEGMENT: case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: case Paths.CONFLICTS_PATH_SEGMENT: case Paths.MEDIA_PATH_SEGMENT: case Paths.OFFERS_PATH_SEGMENT: case Paths.PARTITIONS_PATH_SEGMENT: case Paths.DATABASE_ACCOUNT_PATH_SEGMENT: case Paths.TOPOLOGY_PATH_SEGMENT: case Paths.PARTITION_KEY_RANGES_PATH_SEGMENT: case Paths.SCHEMAS_PATH_SEGMENT: case Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT: return true; default: return false; } } public static String generatePathForNameBased(ResourceType resourceType, String resourceOwnerFullName, String resourceName) { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT + "/" + resourceName; case DocumentCollection: return resourceOwnerFullName + "/" + Paths.COLLECTIONS_PATH_SEGMENT + "/" + resourceName; case StoredProcedure: return resourceOwnerFullName + "/" + Paths.STORED_PROCEDURES_PATH_SEGMENT + "/" + resourceName; case UserDefinedFunction: return resourceOwnerFullName + "/" + Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT + "/" + resourceName; case Trigger: return resourceOwnerFullName + "/" + Paths.TRIGGERS_PATH_SEGMENT + "/" + resourceName; case Attachment: return resourceOwnerFullName + "/" + Paths.ATTACHMENTS_PATH_SEGMENT + "/" + resourceName; case Conflict: return resourceOwnerFullName + "/" + Paths.CONFLICTS_PATH_SEGMENT + "/" + resourceName; case Document: return resourceOwnerFullName + "/" + Paths.DOCUMENTS_PATH_SEGMENT + "/" + resourceName; case Offer: return resourceOwnerFullName + "/" + Paths.OFFERS_PATH_SEGMENT + "/" + resourceName; case Permission: return resourceOwnerFullName + "/" + Paths.PERMISSIONS_PATH_SEGMENT + "/" + resourceName; case User: return resourceOwnerFullName + "/" + Paths.USERS_PATH_SEGMENT + "/" + resourceName; case PartitionKeyRange: return resourceOwnerFullName + "/" + Paths.PARTITION_KEY_RANGES_PATH_SEGMENT + "/" + resourceName; default: return null; } } public static String getDatabasePath(String resourceFullName) { if (resourceFullName != null) { int index = indexOfNth(resourceFullName, '/', 2); if (index > 0) return resourceFullName.substring(0, index); } return resourceFullName; } public static String getParentByIndex(String resourceFullName, int segmentIndex) { int index = indexOfNth(resourceFullName, '/', segmentIndex); if (index > 0) return resourceFullName.substring(0, index); else { index = indexOfNth(resourceFullName, '/', segmentIndex - 1); if (index > 0) return resourceFullName; else return null; } } public static boolean isNameBased(String resourceIdOrFullName) { if (resourceIdOrFullName != null && !resourceIdOrFullName.isEmpty() && resourceIdOrFullName.length() > 4 && resourceIdOrFullName.charAt(3) == '/') { return true; } return false; } private static int indexOfNth(String str, char value, int nthOccurance) { int remaining = nthOccurance; char[] characters = str.toCharArray(); for (int i = 0; i < characters.length; i++) { if (characters[i] == value) { remaining--; if (remaining == 0) { return i; } } } return -1; } public static ResourceType getResourcePathSegment(String resourcePathSegment) throws BadRequestException { if (StringUtils.isEmpty(resourcePathSegment)) { String message = String.format(RMResources.StringArgumentNullOrEmpty, "resourcePathSegment"); throw new BadRequestException(message); } switch (resourcePathSegment) { case Paths.ATTACHMENTS_PATH_SEGMENT: return ResourceType.Attachment; case Paths.COLLECTIONS_PATH_SEGMENT: return ResourceType.DocumentCollection; case Paths.DATABASES_PATH_SEGMENT: return ResourceType.Database; case Paths.PERMISSIONS_PATH_SEGMENT: return ResourceType.Permission; case Paths.USERS_PATH_SEGMENT: return ResourceType.User; case Paths.DOCUMENTS_PATH_SEGMENT: return ResourceType.Document; case Paths.STORED_PROCEDURES_PATH_SEGMENT: return ResourceType.StoredProcedure; case Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT: return ResourceType.UserDefinedFunction; case Paths.TRIGGERS_PATH_SEGMENT: return ResourceType.Trigger; case Paths.CONFLICTS_PATH_SEGMENT: return ResourceType.Conflict; case Paths.OFFERS_PATH_SEGMENT: return ResourceType.Offer; case Paths.SCHEMAS_PATH_SEGMENT: return ResourceType.Schema; } String errorMessage = String.format(RMResources.UnknownResourceType, resourcePathSegment); throw new BadRequestException(errorMessage); } public static String getResourcePath(ResourceType resourceType) throws BadRequestException { switch (resourceType) { case Database: return Paths.DATABASES_PATH_SEGMENT; case DocumentCollection: return Paths.COLLECTIONS_PATH_SEGMENT; case Document: return Paths.DOCUMENTS_PATH_SEGMENT; case StoredProcedure: return Paths.STORED_PROCEDURES_PATH_SEGMENT; case UserDefinedFunction: return Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT; case Trigger: return Paths.TRIGGERS_PATH_SEGMENT; case Conflict: return Paths.CONFLICTS_PATH_SEGMENT; case Attachment: return Paths.ATTACHMENTS_PATH_SEGMENT; case User: return Paths.USERS_PATH_SEGMENT; case Permission: return Paths.PERMISSIONS_PATH_SEGMENT; case Offer: return Paths.OFFERS_PATH_SEGMENT; case MasterPartition: case ServerPartition: return Paths.PARTITIONS_PATH_SEGMENT; case PartitionKeyRange: return Paths.PARTITION_KEY_RANGES_PATH_SEGMENT; case Media: return Paths.MEDIA_ROOT; case Schema: return Paths.SCHEMAS_PATH_SEGMENT; case DatabaseAccount: case Topology: return Paths.ROOT; default: String errorMessage = String.format(RMResources.UnknownResourceType, resourceType.toString()); throw new BadRequestException(errorMessage); } } public static boolean validateResourceFullName(ResourceType resourceType, String resourceFullName) { String[] segments = StringUtils.split(resourceFullName, '/'); String[] resourcePathArray = getResourcePathArray(resourceType); if (resourcePathArray == null) { return false; } if (segments.length != resourcePathArray.length * 2) { return false; } for (int i = 0; i < resourcePathArray.length; i++) { if(resourcePathArray[i].compareTo(segments[2 * i]) != 0) { return false; } } return true; } private static String[] getResourcePathArray(ResourceType resourceType) { List<String> segments = new ArrayList<String>(); segments.add(Paths.DATABASES_PATH_SEGMENT); if (resourceType == ResourceType.Permission || resourceType == ResourceType.User) { segments.add(Paths.USERS_PATH_SEGMENT); if (resourceType == ResourceType.Permission) { segments.add(Paths.PERMISSIONS_PATH_SEGMENT); } } else if (resourceType == ResourceType.ClientEncryptionKey) { segments.add(Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); } else if (resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.StoredProcedure || resourceType == ResourceType.UserDefinedFunction || resourceType == ResourceType.Trigger || resourceType == ResourceType.Conflict || resourceType == ResourceType.Attachment || resourceType == ResourceType.Document || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.Schema) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); if (resourceType == ResourceType.StoredProcedure) { segments.add(Paths.STORED_PROCEDURES_PATH_SEGMENT); } else if(resourceType == ResourceType.UserDefinedFunction) { segments.add(Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); } else if(resourceType == ResourceType.Trigger) { segments.add(Paths.TRIGGERS_PATH_SEGMENT); } else if (resourceType == ResourceType.Conflict) { segments.add(Paths.CONFLICTS_PATH_SEGMENT); } else if (resourceType == ResourceType.Schema) { segments.add(Paths.SCHEMAS_PATH_SEGMENT); } else if(resourceType == ResourceType.Document || resourceType == ResourceType.Attachment) { segments.add(Paths.DOCUMENTS_PATH_SEGMENT); if (resourceType == ResourceType.Attachment) { segments.add(Paths.ATTACHMENTS_PATH_SEGMENT); } } else if(resourceType == ResourceType.PartitionKeyRange) { segments.add(Paths.PARTITION_KEY_RANGES_PATH_SEGMENT); } else if (resourceType == ResourceType.PartitionKey) { segments.add(Paths.COLLECTIONS_PATH_SEGMENT); segments.add(Paths.OPERATIONS_PATH_SEGMENT); } } else if (resourceType != ResourceType.Database) { return null; } return segments.stream().toArray(String[]::new); } public static boolean validateResourceId(ResourceType resourceType, String resourceId) { if (resourceType == ResourceType.Conflict) { return PathsHelper.validateConflictId(resourceId); } else if (resourceType == ResourceType.Database) { return PathsHelper.validateDatabaseId(resourceId); } else if (resourceType == ResourceType.DocumentCollection) { return PathsHelper.validateDocumentCollectionId(resourceId); } else if (resourceType == ResourceType.Document) { return PathsHelper.validateDocumentId(resourceId); } else if (resourceType == ResourceType.Permission) { return PathsHelper.validatePermissionId(resourceId); } else if (resourceType == ResourceType.StoredProcedure) { return PathsHelper.validateStoredProcedureId(resourceId); } else if (resourceType == ResourceType.Trigger) { return PathsHelper.validateTriggerId(resourceId); } else if (resourceType == ResourceType.UserDefinedFunction) { return PathsHelper.validateUserDefinedFunctionId(resourceId); } else if (resourceType == ResourceType.User) { return PathsHelper.validateUserId(resourceId); } else if (resourceType == ResourceType.Attachment) { return PathsHelper.validateAttachmentId(resourceId); } else if (resourceType == ResourceType.ClientEncryptionKey) { return PathsHelper.validateClientEncryptionKeyId(resourceId); }else { logger.error(String.format("ValidateResourceId not implemented for Type %s in ResourceRequestHandler", resourceType.toString())); return false; } } public static boolean validateDatabaseId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDatabase() != 0; } public static boolean validateDocumentCollectionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocumentCollection() != 0; } public static boolean validateDocumentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getDocument() != 0; } public static boolean validateConflictId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getConflict() != 0; } public static boolean validateAttachmentId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getAttachment() != 0; } public static boolean validatePermissionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getPermission() != 0; } public static boolean validateStoredProcedureId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getStoredProcedure() != 0; } public static boolean validateTriggerId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getTrigger() != 0; } public static boolean validateUserDefinedFunctionId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUserDefinedFunction() != 0; } public static boolean validateUserId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getUser() != 0; } public static boolean validateClientEncryptionKeyId(String resourceIdString) { Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString); return pair.getLeft() && pair.getRight().getClientEncryptionKey() != 0; } public static boolean isPublicResource(Resource resourceType) { if (resourceType instanceof Database || resourceType instanceof DocumentCollection || resourceType instanceof StoredProcedure || resourceType instanceof UserDefinedFunction || resourceType instanceof Trigger || resourceType instanceof Conflict || resourceType instanceof User || resourceType instanceof Permission || resourceType instanceof Document || resourceType instanceof Offer ) { return true; } else { return false; } } }
looks like we missed one space. would be nice to have `spotlessApply` to handle this automatically. I know this is in the roadmap. do you know when that will happen for `sdk/monitor/*`?
private static boolean isClient(String metricName) { return metricName.contains(".client."); }
return metricName.contains(".client.");
private static boolean isClient(String metricName) { return metricName.contains(".client."); }
class MetricDataMapper { private static final ClientLogger logger = new ClientLogger(MetricDataMapper.class); private static final Set<String> OTEL_UNSTABLE_METRICS_TO_EXCLUDE = new HashSet<>(); private static final String OTEL_INSTRUMENTATION_NAME_PREFIX = "io.opentelemetry"; private static final Set<String> OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES = new HashSet<>(4); public static final AttributeKey<String> APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME = AttributeKey.stringKey("applicationinsights.internal.metric_name"); private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final boolean captureHttpServer4xxAsError; static { OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.client.duration"); OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.server.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.server.request.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.client.request.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.client.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.server.duration"); } public MetricDataMapper( BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, boolean captureHttpServer4xxAsError) { this.telemetryInitializer = telemetryInitializer; this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; } public void map(MetricData metricData, Consumer<TelemetryItem> consumer) { MetricDataType type = metricData.getType(); if (type == DOUBLE_SUM || type == DOUBLE_GAUGE || type == LONG_SUM || type == LONG_GAUGE || type == HISTOGRAM) { boolean isPreAggregatedStandardMetric = OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.contains(metricData.getName()); if (isPreAggregatedStandardMetric) { List<TelemetryItem> preAggregatedStandardMetrics = convertOtelMetricToAzureMonitorMetric(metricData, true); preAggregatedStandardMetrics.forEach(consumer::accept); } if (OTEL_UNSTABLE_METRICS_TO_EXCLUDE.contains(metricData.getName()) && metricData.getInstrumentationScopeInfo().getName().startsWith(OTEL_INSTRUMENTATION_NAME_PREFIX)) { return; } List<TelemetryItem> stableOtelMetrics = convertOtelMetricToAzureMonitorMetric(metricData, false); stableOtelMetrics.forEach(consumer::accept); } else { logger.warning("metric data type {} is not supported yet.", metricData.getType()); } } private List<TelemetryItem> convertOtelMetricToAzureMonitorMetric( MetricData metricData, boolean isPreAggregatedStandardMetric) { List<TelemetryItem> telemetryItems = new ArrayList<>(); for (PointData pointData : metricData.getData().getPoints()) { MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); telemetryInitializer.accept(builder, metricData.getResource()); builder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(pointData.getEpochNanos())); updateMetricPointBuilder( builder, metricData, pointData, captureHttpServer4xxAsError, isPreAggregatedStandardMetric); telemetryItems.add(builder.build()); } return telemetryItems; } public static void updateMetricPointBuilder( MetricTelemetryBuilder metricTelemetryBuilder, MetricData metricData, PointData pointData, boolean captureHttpServer4xxAsError, boolean isPreAggregatedStandardMetric) { checkArgument(metricData != null, "MetricData cannot be null."); MetricPointBuilder pointBuilder = new MetricPointBuilder(); MetricDataType type = metricData.getType(); double pointDataValue; switch (type) { case LONG_SUM: case LONG_GAUGE: pointDataValue = (double) ((LongPointData) pointData).getValue(); break; case DOUBLE_SUM: case DOUBLE_GAUGE: pointDataValue = ((DoublePointData) pointData).getValue(); break; case HISTOGRAM: long histogramCount = ((HistogramPointData) pointData).getCount(); if (histogramCount <= Integer.MAX_VALUE && histogramCount >= Integer.MIN_VALUE) { pointBuilder.setCount((int) histogramCount); } HistogramPointData histogramPointData = (HistogramPointData) pointData; double min = histogramPointData.getMin(); double max = histogramPointData.getMax(); if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) { min = min * 1000; max = max * 1000; } pointDataValue = histogramPointData.getSum(); pointBuilder.setMin(min); pointBuilder.setMax(max); break; case SUMMARY: case EXPONENTIAL_HISTOGRAM: default: throw new IllegalArgumentException("metric data type '" + type + "' is not supported yet"); } if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) { pointDataValue = pointDataValue * 1000; } pointBuilder.setValue(pointDataValue); String metricName = pointData.getAttributes().get(APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME); if (metricName != null) { pointBuilder.setName(metricName); } else { pointBuilder.setName(metricData.getName()); } metricTelemetryBuilder.setMetricPoint(pointBuilder); Attributes attributes = pointData.getAttributes(); if (isPreAggregatedStandardMetric) { Long statusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); boolean success = isSuccess(metricData.getName(), statusCode, captureHttpServer4xxAsError); Boolean isSynthetic = attributes.get(IS_SYNTHETIC); attributes.forEach( (key, value) -> applyConnectionStringAndRoleNameOverrides( metricTelemetryBuilder, value, key.getKey())); if (isServer(metricData.getName())) { RequestExtractor.extract(metricTelemetryBuilder, statusCode, success, isSynthetic); } else if (isClient(metricData.getName())) { String dependencyType; int defaultPort; if (metricData.getName().startsWith("http")) { dependencyType = "Http"; defaultPort = getDefaultPortForHttpScheme(getStableOrOldAttribute(attributes, SemanticAttributes.URL_SCHEME, SemanticAttributes.HTTP_SCHEME)); } else { dependencyType = attributes.get(SemanticAttributes.RPC_SYSTEM); if (dependencyType == null) { dependencyType = "Unknown"; } defaultPort = Integer.MAX_VALUE; } String target = SpanDataMapper.getTargetOrDefault(attributes, defaultPort, dependencyType); DependencyExtractor.extract( metricTelemetryBuilder, statusCode, success, dependencyType, target, isSynthetic); } } else { MappingsBuilder mappingsBuilder = new MappingsBuilder(METRIC); mappingsBuilder.build().map(attributes, metricTelemetryBuilder); } } private static boolean shouldConvertToMilliseconds(String metricName, boolean isPreAggregatedStandardMetric) { return isPreAggregatedStandardMetric && (metricName.equals("http.server.request.duration") || metricName.equals("http.client.request.duration")); } private static boolean applyConnectionStringAndRoleNameOverrides( AbstractTelemetryBuilder telemetryBuilder, Object value, String key) { if (key.equals(AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey()) && value instanceof String) { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); return true; } if (key.equals(AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey()) && value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); return true; } return false; } private static int getDefaultPortForHttpScheme(@Nullable String httpScheme) { if (httpScheme == null) { return Integer.MAX_VALUE; } if (httpScheme.equals("https")) { return 443; } if (httpScheme.equals("http")) { return 80; } return Integer.MAX_VALUE; } private static boolean isSuccess(String metricName, Long statusCode, boolean captureHttpServer4xxAsError) { if (statusCode == null) { return true; } if (isClient(metricName)) { return statusCode < 400; } if (isServer(metricName)) { if (captureHttpServer4xxAsError) { return statusCode < 400; } return statusCode < 500; } return false; } private static boolean isServer(String metricName) { return metricName.contains(".server."); } }
class MetricDataMapper { private static final ClientLogger logger = new ClientLogger(MetricDataMapper.class); private static final Set<String> OTEL_UNSTABLE_METRICS_TO_EXCLUDE = new HashSet<>(); private static final String OTEL_INSTRUMENTATION_NAME_PREFIX = "io.opentelemetry"; private static final Set<String> OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES = new HashSet<>(4); public static final AttributeKey<String> APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME = AttributeKey.stringKey("applicationinsights.internal.metric_name"); private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final boolean captureHttpServer4xxAsError; static { OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.client.duration"); OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.server.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.server.request.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.client.request.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.client.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.server.duration"); } public MetricDataMapper( BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, boolean captureHttpServer4xxAsError) { this.telemetryInitializer = telemetryInitializer; this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; } public void map(MetricData metricData, Consumer<TelemetryItem> consumer) { MetricDataType type = metricData.getType(); if (type == DOUBLE_SUM || type == DOUBLE_GAUGE || type == LONG_SUM || type == LONG_GAUGE || type == HISTOGRAM) { boolean isPreAggregatedStandardMetric = OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.contains(metricData.getName()); if (isPreAggregatedStandardMetric) { List<TelemetryItem> preAggregatedStandardMetrics = convertOtelMetricToAzureMonitorMetric(metricData, true); preAggregatedStandardMetrics.forEach(consumer::accept); } if (OTEL_UNSTABLE_METRICS_TO_EXCLUDE.contains(metricData.getName()) && metricData.getInstrumentationScopeInfo().getName().startsWith(OTEL_INSTRUMENTATION_NAME_PREFIX)) { return; } List<TelemetryItem> stableOtelMetrics = convertOtelMetricToAzureMonitorMetric(metricData, false); stableOtelMetrics.forEach(consumer::accept); } else { logger.warning("metric data type {} is not supported yet.", metricData.getType()); } } private List<TelemetryItem> convertOtelMetricToAzureMonitorMetric( MetricData metricData, boolean isPreAggregatedStandardMetric) { List<TelemetryItem> telemetryItems = new ArrayList<>(); for (PointData pointData : metricData.getData().getPoints()) { MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); telemetryInitializer.accept(builder, metricData.getResource()); builder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(pointData.getEpochNanos())); updateMetricPointBuilder( builder, metricData, pointData, captureHttpServer4xxAsError, isPreAggregatedStandardMetric); telemetryItems.add(builder.build()); } return telemetryItems; } public static void updateMetricPointBuilder( MetricTelemetryBuilder metricTelemetryBuilder, MetricData metricData, PointData pointData, boolean captureHttpServer4xxAsError, boolean isPreAggregatedStandardMetric) { checkArgument(metricData != null, "MetricData cannot be null."); MetricPointBuilder pointBuilder = new MetricPointBuilder(); MetricDataType type = metricData.getType(); double pointDataValue; switch (type) { case LONG_SUM: case LONG_GAUGE: pointDataValue = (double) ((LongPointData) pointData).getValue(); break; case DOUBLE_SUM: case DOUBLE_GAUGE: pointDataValue = ((DoublePointData) pointData).getValue(); break; case HISTOGRAM: long histogramCount = ((HistogramPointData) pointData).getCount(); if (histogramCount <= Integer.MAX_VALUE && histogramCount >= Integer.MIN_VALUE) { pointBuilder.setCount((int) histogramCount); } HistogramPointData histogramPointData = (HistogramPointData) pointData; double min = histogramPointData.getMin(); double max = histogramPointData.getMax(); if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) { min = min * 1000; max = max * 1000; } pointDataValue = histogramPointData.getSum(); pointBuilder.setMin(min); pointBuilder.setMax(max); break; case SUMMARY: case EXPONENTIAL_HISTOGRAM: default: throw new IllegalArgumentException("metric data type '" + type + "' is not supported yet"); } if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) { pointDataValue = pointDataValue * 1000; } pointBuilder.setValue(pointDataValue); String metricName = pointData.getAttributes().get(APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME); if (metricName != null) { pointBuilder.setName(metricName); } else { pointBuilder.setName(metricData.getName()); } metricTelemetryBuilder.setMetricPoint(pointBuilder); Attributes attributes = pointData.getAttributes(); if (isPreAggregatedStandardMetric) { Long statusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); boolean success = isSuccess(metricData.getName(), statusCode, captureHttpServer4xxAsError); Boolean isSynthetic = attributes.get(IS_SYNTHETIC); attributes.forEach( (key, value) -> applyConnectionStringAndRoleNameOverrides( metricTelemetryBuilder, value, key.getKey())); if (isServer(metricData.getName())) { RequestExtractor.extract(metricTelemetryBuilder, statusCode, success, isSynthetic); } else if (isClient(metricData.getName())) { String dependencyType; int defaultPort; if (metricData.getName().startsWith("http")) { dependencyType = "Http"; defaultPort = getDefaultPortForHttpScheme(getStableOrOldAttribute(attributes, SemanticAttributes.URL_SCHEME, SemanticAttributes.HTTP_SCHEME)); } else { dependencyType = attributes.get(SemanticAttributes.RPC_SYSTEM); if (dependencyType == null) { dependencyType = "Unknown"; } defaultPort = Integer.MAX_VALUE; } String target = SpanDataMapper.getTargetOrDefault(attributes, defaultPort, dependencyType); DependencyExtractor.extract( metricTelemetryBuilder, statusCode, success, dependencyType, target, isSynthetic); } } else { MappingsBuilder mappingsBuilder = new MappingsBuilder(METRIC); mappingsBuilder.build().map(attributes, metricTelemetryBuilder); } } private static boolean shouldConvertToMilliseconds(String metricName, boolean isPreAggregatedStandardMetric) { return isPreAggregatedStandardMetric && (metricName.equals("http.server.request.duration") || metricName.equals("http.client.request.duration")); } private static boolean applyConnectionStringAndRoleNameOverrides( AbstractTelemetryBuilder telemetryBuilder, Object value, String key) { if (key.equals(AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey()) && value instanceof String) { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); return true; } if (key.equals(AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey()) && value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); return true; } return false; } private static int getDefaultPortForHttpScheme(@Nullable String httpScheme) { if (httpScheme == null) { return Integer.MAX_VALUE; } if (httpScheme.equals("https")) { return 443; } if (httpScheme.equals("http")) { return 80; } return Integer.MAX_VALUE; } private static boolean isSuccess(String metricName, Long statusCode, boolean captureHttpServer4xxAsError) { if (statusCode == null) { return true; } if (isClient(metricName)) { return statusCode < 400; } if (isServer(metricName)) { if (captureHttpServer4xxAsError) { return statusCode < 400; } return statusCode < 500; } return false; } private static boolean isServer(String metricName) { return metricName.contains(".server."); } }
That can be added any time when we feel comfortable with Spotless formatting the code in Monitor OpenTelemetry Exporter. I can file another PR after this for that if you want.
private static boolean isClient(String metricName) { return metricName.contains(".client."); }
return metricName.contains(".client.");
private static boolean isClient(String metricName) { return metricName.contains(".client."); }
class MetricDataMapper { private static final ClientLogger logger = new ClientLogger(MetricDataMapper.class); private static final Set<String> OTEL_UNSTABLE_METRICS_TO_EXCLUDE = new HashSet<>(); private static final String OTEL_INSTRUMENTATION_NAME_PREFIX = "io.opentelemetry"; private static final Set<String> OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES = new HashSet<>(4); public static final AttributeKey<String> APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME = AttributeKey.stringKey("applicationinsights.internal.metric_name"); private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final boolean captureHttpServer4xxAsError; static { OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.client.duration"); OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.server.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.server.request.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.client.request.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.client.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.server.duration"); } public MetricDataMapper( BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, boolean captureHttpServer4xxAsError) { this.telemetryInitializer = telemetryInitializer; this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; } public void map(MetricData metricData, Consumer<TelemetryItem> consumer) { MetricDataType type = metricData.getType(); if (type == DOUBLE_SUM || type == DOUBLE_GAUGE || type == LONG_SUM || type == LONG_GAUGE || type == HISTOGRAM) { boolean isPreAggregatedStandardMetric = OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.contains(metricData.getName()); if (isPreAggregatedStandardMetric) { List<TelemetryItem> preAggregatedStandardMetrics = convertOtelMetricToAzureMonitorMetric(metricData, true); preAggregatedStandardMetrics.forEach(consumer::accept); } if (OTEL_UNSTABLE_METRICS_TO_EXCLUDE.contains(metricData.getName()) && metricData.getInstrumentationScopeInfo().getName().startsWith(OTEL_INSTRUMENTATION_NAME_PREFIX)) { return; } List<TelemetryItem> stableOtelMetrics = convertOtelMetricToAzureMonitorMetric(metricData, false); stableOtelMetrics.forEach(consumer::accept); } else { logger.warning("metric data type {} is not supported yet.", metricData.getType()); } } private List<TelemetryItem> convertOtelMetricToAzureMonitorMetric( MetricData metricData, boolean isPreAggregatedStandardMetric) { List<TelemetryItem> telemetryItems = new ArrayList<>(); for (PointData pointData : metricData.getData().getPoints()) { MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); telemetryInitializer.accept(builder, metricData.getResource()); builder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(pointData.getEpochNanos())); updateMetricPointBuilder( builder, metricData, pointData, captureHttpServer4xxAsError, isPreAggregatedStandardMetric); telemetryItems.add(builder.build()); } return telemetryItems; } public static void updateMetricPointBuilder( MetricTelemetryBuilder metricTelemetryBuilder, MetricData metricData, PointData pointData, boolean captureHttpServer4xxAsError, boolean isPreAggregatedStandardMetric) { checkArgument(metricData != null, "MetricData cannot be null."); MetricPointBuilder pointBuilder = new MetricPointBuilder(); MetricDataType type = metricData.getType(); double pointDataValue; switch (type) { case LONG_SUM: case LONG_GAUGE: pointDataValue = (double) ((LongPointData) pointData).getValue(); break; case DOUBLE_SUM: case DOUBLE_GAUGE: pointDataValue = ((DoublePointData) pointData).getValue(); break; case HISTOGRAM: long histogramCount = ((HistogramPointData) pointData).getCount(); if (histogramCount <= Integer.MAX_VALUE && histogramCount >= Integer.MIN_VALUE) { pointBuilder.setCount((int) histogramCount); } HistogramPointData histogramPointData = (HistogramPointData) pointData; double min = histogramPointData.getMin(); double max = histogramPointData.getMax(); if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) { min = min * 1000; max = max * 1000; } pointDataValue = histogramPointData.getSum(); pointBuilder.setMin(min); pointBuilder.setMax(max); break; case SUMMARY: case EXPONENTIAL_HISTOGRAM: default: throw new IllegalArgumentException("metric data type '" + type + "' is not supported yet"); } if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) { pointDataValue = pointDataValue * 1000; } pointBuilder.setValue(pointDataValue); String metricName = pointData.getAttributes().get(APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME); if (metricName != null) { pointBuilder.setName(metricName); } else { pointBuilder.setName(metricData.getName()); } metricTelemetryBuilder.setMetricPoint(pointBuilder); Attributes attributes = pointData.getAttributes(); if (isPreAggregatedStandardMetric) { Long statusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); boolean success = isSuccess(metricData.getName(), statusCode, captureHttpServer4xxAsError); Boolean isSynthetic = attributes.get(IS_SYNTHETIC); attributes.forEach( (key, value) -> applyConnectionStringAndRoleNameOverrides( metricTelemetryBuilder, value, key.getKey())); if (isServer(metricData.getName())) { RequestExtractor.extract(metricTelemetryBuilder, statusCode, success, isSynthetic); } else if (isClient(metricData.getName())) { String dependencyType; int defaultPort; if (metricData.getName().startsWith("http")) { dependencyType = "Http"; defaultPort = getDefaultPortForHttpScheme(getStableOrOldAttribute(attributes, SemanticAttributes.URL_SCHEME, SemanticAttributes.HTTP_SCHEME)); } else { dependencyType = attributes.get(SemanticAttributes.RPC_SYSTEM); if (dependencyType == null) { dependencyType = "Unknown"; } defaultPort = Integer.MAX_VALUE; } String target = SpanDataMapper.getTargetOrDefault(attributes, defaultPort, dependencyType); DependencyExtractor.extract( metricTelemetryBuilder, statusCode, success, dependencyType, target, isSynthetic); } } else { MappingsBuilder mappingsBuilder = new MappingsBuilder(METRIC); mappingsBuilder.build().map(attributes, metricTelemetryBuilder); } } private static boolean shouldConvertToMilliseconds(String metricName, boolean isPreAggregatedStandardMetric) { return isPreAggregatedStandardMetric && (metricName.equals("http.server.request.duration") || metricName.equals("http.client.request.duration")); } private static boolean applyConnectionStringAndRoleNameOverrides( AbstractTelemetryBuilder telemetryBuilder, Object value, String key) { if (key.equals(AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey()) && value instanceof String) { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); return true; } if (key.equals(AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey()) && value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); return true; } return false; } private static int getDefaultPortForHttpScheme(@Nullable String httpScheme) { if (httpScheme == null) { return Integer.MAX_VALUE; } if (httpScheme.equals("https")) { return 443; } if (httpScheme.equals("http")) { return 80; } return Integer.MAX_VALUE; } private static boolean isSuccess(String metricName, Long statusCode, boolean captureHttpServer4xxAsError) { if (statusCode == null) { return true; } if (isClient(metricName)) { return statusCode < 400; } if (isServer(metricName)) { if (captureHttpServer4xxAsError) { return statusCode < 400; } return statusCode < 500; } return false; } private static boolean isServer(String metricName) { return metricName.contains(".server."); } }
class MetricDataMapper { private static final ClientLogger logger = new ClientLogger(MetricDataMapper.class); private static final Set<String> OTEL_UNSTABLE_METRICS_TO_EXCLUDE = new HashSet<>(); private static final String OTEL_INSTRUMENTATION_NAME_PREFIX = "io.opentelemetry"; private static final Set<String> OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES = new HashSet<>(4); public static final AttributeKey<String> APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME = AttributeKey.stringKey("applicationinsights.internal.metric_name"); private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final boolean captureHttpServer4xxAsError; static { OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.client.duration"); OTEL_UNSTABLE_METRICS_TO_EXCLUDE.add("rpc.server.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.server.request.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("http.client.request.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.client.duration"); OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.add("rpc.server.duration"); } public MetricDataMapper( BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, boolean captureHttpServer4xxAsError) { this.telemetryInitializer = telemetryInitializer; this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; } public void map(MetricData metricData, Consumer<TelemetryItem> consumer) { MetricDataType type = metricData.getType(); if (type == DOUBLE_SUM || type == DOUBLE_GAUGE || type == LONG_SUM || type == LONG_GAUGE || type == HISTOGRAM) { boolean isPreAggregatedStandardMetric = OTEL_PRE_AGGREGATED_STANDARD_METRIC_NAMES.contains(metricData.getName()); if (isPreAggregatedStandardMetric) { List<TelemetryItem> preAggregatedStandardMetrics = convertOtelMetricToAzureMonitorMetric(metricData, true); preAggregatedStandardMetrics.forEach(consumer::accept); } if (OTEL_UNSTABLE_METRICS_TO_EXCLUDE.contains(metricData.getName()) && metricData.getInstrumentationScopeInfo().getName().startsWith(OTEL_INSTRUMENTATION_NAME_PREFIX)) { return; } List<TelemetryItem> stableOtelMetrics = convertOtelMetricToAzureMonitorMetric(metricData, false); stableOtelMetrics.forEach(consumer::accept); } else { logger.warning("metric data type {} is not supported yet.", metricData.getType()); } } private List<TelemetryItem> convertOtelMetricToAzureMonitorMetric( MetricData metricData, boolean isPreAggregatedStandardMetric) { List<TelemetryItem> telemetryItems = new ArrayList<>(); for (PointData pointData : metricData.getData().getPoints()) { MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); telemetryInitializer.accept(builder, metricData.getResource()); builder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(pointData.getEpochNanos())); updateMetricPointBuilder( builder, metricData, pointData, captureHttpServer4xxAsError, isPreAggregatedStandardMetric); telemetryItems.add(builder.build()); } return telemetryItems; } public static void updateMetricPointBuilder( MetricTelemetryBuilder metricTelemetryBuilder, MetricData metricData, PointData pointData, boolean captureHttpServer4xxAsError, boolean isPreAggregatedStandardMetric) { checkArgument(metricData != null, "MetricData cannot be null."); MetricPointBuilder pointBuilder = new MetricPointBuilder(); MetricDataType type = metricData.getType(); double pointDataValue; switch (type) { case LONG_SUM: case LONG_GAUGE: pointDataValue = (double) ((LongPointData) pointData).getValue(); break; case DOUBLE_SUM: case DOUBLE_GAUGE: pointDataValue = ((DoublePointData) pointData).getValue(); break; case HISTOGRAM: long histogramCount = ((HistogramPointData) pointData).getCount(); if (histogramCount <= Integer.MAX_VALUE && histogramCount >= Integer.MIN_VALUE) { pointBuilder.setCount((int) histogramCount); } HistogramPointData histogramPointData = (HistogramPointData) pointData; double min = histogramPointData.getMin(); double max = histogramPointData.getMax(); if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) { min = min * 1000; max = max * 1000; } pointDataValue = histogramPointData.getSum(); pointBuilder.setMin(min); pointBuilder.setMax(max); break; case SUMMARY: case EXPONENTIAL_HISTOGRAM: default: throw new IllegalArgumentException("metric data type '" + type + "' is not supported yet"); } if (shouldConvertToMilliseconds(metricData.getName(), isPreAggregatedStandardMetric)) { pointDataValue = pointDataValue * 1000; } pointBuilder.setValue(pointDataValue); String metricName = pointData.getAttributes().get(APPLICATIONINSIGHTS_INTERNAL_METRIC_NAME); if (metricName != null) { pointBuilder.setName(metricName); } else { pointBuilder.setName(metricData.getName()); } metricTelemetryBuilder.setMetricPoint(pointBuilder); Attributes attributes = pointData.getAttributes(); if (isPreAggregatedStandardMetric) { Long statusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); boolean success = isSuccess(metricData.getName(), statusCode, captureHttpServer4xxAsError); Boolean isSynthetic = attributes.get(IS_SYNTHETIC); attributes.forEach( (key, value) -> applyConnectionStringAndRoleNameOverrides( metricTelemetryBuilder, value, key.getKey())); if (isServer(metricData.getName())) { RequestExtractor.extract(metricTelemetryBuilder, statusCode, success, isSynthetic); } else if (isClient(metricData.getName())) { String dependencyType; int defaultPort; if (metricData.getName().startsWith("http")) { dependencyType = "Http"; defaultPort = getDefaultPortForHttpScheme(getStableOrOldAttribute(attributes, SemanticAttributes.URL_SCHEME, SemanticAttributes.HTTP_SCHEME)); } else { dependencyType = attributes.get(SemanticAttributes.RPC_SYSTEM); if (dependencyType == null) { dependencyType = "Unknown"; } defaultPort = Integer.MAX_VALUE; } String target = SpanDataMapper.getTargetOrDefault(attributes, defaultPort, dependencyType); DependencyExtractor.extract( metricTelemetryBuilder, statusCode, success, dependencyType, target, isSynthetic); } } else { MappingsBuilder mappingsBuilder = new MappingsBuilder(METRIC); mappingsBuilder.build().map(attributes, metricTelemetryBuilder); } } private static boolean shouldConvertToMilliseconds(String metricName, boolean isPreAggregatedStandardMetric) { return isPreAggregatedStandardMetric && (metricName.equals("http.server.request.duration") || metricName.equals("http.client.request.duration")); } private static boolean applyConnectionStringAndRoleNameOverrides( AbstractTelemetryBuilder telemetryBuilder, Object value, String key) { if (key.equals(AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey()) && value instanceof String) { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); return true; } if (key.equals(AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey()) && value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); return true; } return false; } private static int getDefaultPortForHttpScheme(@Nullable String httpScheme) { if (httpScheme == null) { return Integer.MAX_VALUE; } if (httpScheme.equals("https")) { return 443; } if (httpScheme.equals("http")) { return 80; } return Integer.MAX_VALUE; } private static boolean isSuccess(String metricName, Long statusCode, boolean captureHttpServer4xxAsError) { if (statusCode == null) { return true; } if (isClient(metricName)) { return statusCode < 400; } if (isServer(metricName)) { if (captureHttpServer4xxAsError) { return statusCode < 400; } return statusCode < 500; } return false; } private static boolean isServer(String metricName) { return metricName.contains(".server."); } }
We shouldn't need to mock the `TimeWindowFilter`, they were written in a way that should always result in it being true. We aren't testing edge cases with the validation tests. The dates that need to be in the future are all in the year `3023` and all the ones in the past are the year `2023`. These tests without a mock will run fine for another 999 or so years.
public void setup() { MockitoAnnotations.openMocks(this); when(configProperties.isFailFast()).thenReturn(true); when(context.getBean(Mockito.contains("TimeWindow"))).thenReturn(new TimeWindowFilter()); }
when(context.getBean(Mockito.contains("TimeWindow"))).thenReturn(new TimeWindowFilter());
public void setup() { MockitoAnnotations.openMocks(this); when(configProperties.isFailFast()).thenReturn(true); when(context.getBean(Mockito.contains("TimeWindow"))).thenReturn(new TimeWindowFilter()); }
class ValidationsTest { @Mock private ApplicationContext context; @Mock private FeatureManagementConfigProperties configProperties; private final ObjectMapper objectMapper = JsonMapper.builder() .configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true).build(); private final String testCaseFolderPath = "validations-tests"; private final String inputsUser = "user"; private final String inputsGroups = "groups"; private final String sampleFileNameFilter = "sample"; private final String testsFileNameFilter = "tests"; @BeforeEach @AfterEach public void cleanup() throws Exception { MockitoAnnotations.openMocks(this).close(); } private boolean hasException(ValidationTestCase testCase) { final String exceptionStr = testCase.getIsEnabled().getException(); return exceptionStr != null && !exceptionStr.isEmpty(); } private boolean hasInput(ValidationTestCase testCase) { final LinkedHashMap<String, Object> inputsMap = testCase.getInputs(); return inputsMap != null && !inputsMap.isEmpty(); } private File[] getFileList(String fileNameFilter) { final URL folderUrl = Thread.currentThread().getContextClassLoader().getResource(testCaseFolderPath); assert folderUrl != null; final File folderFile = new File(folderUrl.getFile()); final File[] filteredFiles = folderFile.listFiles(pathname -> pathname.getName().toLowerCase().contains(fileNameFilter)); assert filteredFiles != null; Arrays.sort(filteredFiles, Comparator.comparing(File::getName)); return filteredFiles; } private List<ValidationTestCase> readTestcasesFromFile(File testFile) throws IOException { final String jsonString = Files.readString(testFile.toPath()); final CollectionType typeReference = TypeFactory.defaultInstance().constructCollectionType(List.class, ValidationTestCase.class); return objectMapper.readValue(jsonString, typeReference); } @SuppressWarnings("unchecked") private LinkedHashMap<String, Object> readConfigurationFromFile(File sampleFile) throws IOException { final String jsonString = Files.readString(sampleFile.toPath()); final LinkedHashMap<String, Object> configurations = objectMapper.readValue(jsonString, new TypeReference<>() { }); final Object featureManagementSection = configurations.get("feature_management"); if (featureManagementSection.getClass().isAssignableFrom(LinkedHashMap.class)) { return (LinkedHashMap<String, Object>) featureManagementSection; } return new LinkedHashMap<>(); } @SuppressWarnings("unchecked") private void runTestcases(File sampleFile, File testsFile) throws IOException { final FeatureManagementProperties managementProperties = new FeatureManagementProperties(); managementProperties.putAll(readConfigurationFromFile(sampleFile)); final FeatureManager featureManager = new FeatureManager(context, managementProperties, configProperties); final List<ValidationTestCase> testCases = readTestcasesFromFile(testsFile); System.out.println("Running test case from file: " + testsFile.getName()); for (int i = 0; i < testCases.size(); i++) { System.out.println("Test case " + i + " : " + testCases.get(i).getDescription()); if (hasException(testCases.get(i))) { assertNull(managementProperties.getOnOff().get(testCases.get(i).getFeatureFlagName())); } else { if (hasInput(testCases.get(i))) { final Object userObj = testCases.get(i).getInputs().get(inputsUser); final Object groupsObj = testCases.get(i).getInputs().get(inputsGroups); final String user = userObj != null ? userObj.toString() : null; final List<String> groups = groupsObj != null ? (List<String>) groupsObj : null; when(context.getBean(Mockito.contains("Targeting"))).thenReturn(new TargetingFilter(new TargetingFilterTestContextAccessor(user, groups))); } final Boolean result = featureManager.isEnabled(testCases.get(i).getFeatureFlagName()); assertEquals(result.toString(), testCases.get(i).getIsEnabled().getResult()); } } } @Test void validationsTest() throws IOException { final File[] sampleFiles = getFileList(sampleFileNameFilter); final File[] testsFiles = getFileList(testsFileNameFilter); if (sampleFiles.length != testsFiles.length) { throw new IllegalArgumentException("The sample files and tests files should have same count."); } for (int i = 0; i < sampleFiles.length; i++) { if (sampleFiles[i].getName().contains("TargetingFilter.sample")) { continue; } runTestcases(sampleFiles[i], testsFiles[i]); } } }
class ValidationsTest { @Mock private ApplicationContext context; @Mock private FeatureManagementConfigProperties configProperties; private static final Logger LOGGER = LoggerFactory.getLogger(ValidationsTest.class); private static final ObjectMapper OBJECT_MAPPER = JsonMapper.builder() .configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true).build(); private static final String TEST_CASE_FOLDER_PATH = "validations-tests"; private final String inputsUser = "user"; private final String inputsGroups = "groups"; private static final String SAMPLE_FILE_NAME_FILTER = "sample"; private static final String TESTS_FILE_NAME_FILTER = "tests"; @BeforeEach @AfterEach public void cleanup() throws Exception { MockitoAnnotations.openMocks(this).close(); } private boolean hasException(ValidationTestCase testCase) { final String exceptionStr = testCase.getIsEnabled().getException(); return exceptionStr != null && !exceptionStr.isEmpty(); } private boolean hasInput(ValidationTestCase testCase) { final LinkedHashMap<String, Object> inputsMap = testCase.getInputs(); return inputsMap != null && !inputsMap.isEmpty(); } private static File[] getFileList(String fileNameFilter) { final URL folderUrl = Thread.currentThread().getContextClassLoader().getResource(TEST_CASE_FOLDER_PATH); assert folderUrl != null; final File folderFile = new File(folderUrl.getFile()); final File[] filteredFiles = folderFile .listFiles(pathname -> pathname.getName().toLowerCase().contains(fileNameFilter)); assert filteredFiles != null; Arrays.sort(filteredFiles, Comparator.comparing(File::getName)); return filteredFiles; } private List<ValidationTestCase> readTestcasesFromFile(File testFile) throws IOException { final String jsonString = Files.readString(testFile.toPath()); final CollectionType typeReference = TypeFactory.defaultInstance().constructCollectionType(List.class, ValidationTestCase.class); return OBJECT_MAPPER.readValue(jsonString, typeReference); } @SuppressWarnings("unchecked") private static LinkedHashMap<String, Object> readConfigurationFromFile(File sampleFile) throws IOException { final String jsonString = Files.readString(sampleFile.toPath()); final LinkedHashMap<String, Object> configurations = OBJECT_MAPPER.readValue(jsonString, new TypeReference<>() { }); final Object featureManagementSection = configurations.get("feature_management"); if (featureManagementSection.getClass().isAssignableFrom(LinkedHashMap.class)) { return (LinkedHashMap<String, Object>) featureManagementSection; } throw new IllegalArgumentException("feature_management part is not a map"); } static Stream<Arguments> testProvider() throws IOException { List<Arguments> arguments = new ArrayList<>(); File[] files = getFileList(TESTS_FILE_NAME_FILTER); final File[] sampleFiles = getFileList(SAMPLE_FILE_NAME_FILTER); List<FeatureManagementProperties> properties = new ArrayList<>(); for (File sampleFile : sampleFiles) { final FeatureManagementProperties managementProperties = new FeatureManagementProperties(); managementProperties.putAll(readConfigurationFromFile(sampleFile)); properties.add(managementProperties); } for (int i = 0; i < files.length; i++) { if (files[i].getName().contains(("TargetingFilter"))) { continue; } arguments.add(Arguments.of(files[i].getName(), files[i], properties.get(i))); } return arguments.stream(); } @ParameterizedTest(name = "{0}") @MethodSource("testProvider") void validationTest(String name, File testsFile, FeatureManagementProperties managementProperties) throws IOException { LOGGER.debug("Running test case from file: " + name); final FeatureManager featureManager = new FeatureManager(context, managementProperties, configProperties); List<ValidationTestCase> testCases = readTestcasesFromFile(testsFile); for (ValidationTestCase testCase : testCases) { LOGGER.debug("Test case : " + testCase.getDescription()); if (hasException(testCase)) { assertNull(managementProperties.getOnOff().get(testCase.getFeatureFlagName())); continue; } if (hasInput(testCase)) { final Object userObj = testCase.getInputs().get(inputsUser); final Object groupsObj = testCase.getInputs().get(inputsGroups); final String user = userObj != null ? userObj.toString() : null; @SuppressWarnings("unchecked") final List<String> groups = groupsObj != null ? (List<String>) groupsObj : null; when(context.getBean(Mockito.contains("Targeting"))) .thenReturn(new TargetingFilter(new TargetingFilterTestContextAccessor(user, groups))); } final Boolean result = featureManager.isEnabled(testCase.getFeatureFlagName()); assertEquals(result.toString(), testCase.getIsEnabled().getResult()); } } }
Shouldn't we have repurposed this runner for `file_search` instead? I believe that the tool definition was only renamed but functionally is nearly identical.
void createFunctionToolCallRunner(BiConsumer<AssistantCreationOptions, AssistantThreadCreationOptions> testRunner) { FunctionsToolCallHelper functionsToolCallHelper = new FunctionsToolCallHelper(); List<ToolDefinition> toolDefinition = Arrays.asList( functionsToolCallHelper.getAirlinePriceToDestinationForSeasonDefinition(), functionsToolCallHelper.getFavoriteVacationDestinationDefinition(), functionsToolCallHelper.getPreferredAirlineForSeasonDefinition() ); AssistantCreationOptions assistantOptions = new AssistantCreationOptions(GPT_4_1106_PREVIEW) .setName("Java SDK Function Tool Call Test") .setInstructions("You are a helpful assistant that can help fetch data from files you know about.") .setTools(toolDefinition); AssistantThreadCreationOptions threadCreationOptions = new AssistantThreadCreationOptions(); testRunner.accept(assistantOptions, threadCreationOptions); }
FunctionsToolCallHelper functionsToolCallHelper = new FunctionsToolCallHelper();
void createFunctionToolCallRunner(BiConsumer<AssistantCreationOptions, AssistantThreadCreationOptions> testRunner) { FunctionsToolCallHelper functionsToolCallHelper = new FunctionsToolCallHelper(); List<ToolDefinition> toolDefinition = Arrays.asList( functionsToolCallHelper.getAirlinePriceToDestinationForSeasonDefinition(), functionsToolCallHelper.getFavoriteVacationDestinationDefinition(), functionsToolCallHelper.getPreferredAirlineForSeasonDefinition() ); AssistantCreationOptions assistantOptions = new AssistantCreationOptions(GPT_4_1106_PREVIEW) .setName("Java SDK Function Tool Call Test") .setInstructions("You are a helpful assistant that can help fetch data from files you know about.") .setTools(toolDefinition); AssistantThreadCreationOptions threadCreationOptions = new AssistantThreadCreationOptions(); testRunner.accept(assistantOptions, threadCreationOptions); }
class AssistantsClientTestBase extends TestProxyTestBase { private static final String[] REMOVE_SANITIZER_ID = {"AZSDK3430", "AZSDK3493", "AZSDK2015"}; private static final String JAVA_SDK_TESTS_ASSISTANTS_TXT = "java_sdk_tests_assistants.txt"; private static final String JAVA_SDK_TESTS_FINE_TUNING_JSON = "java_sdk_tests_fine_tuning.json"; private static final String MS_LOGO_PNG = "ms_logo.png"; AssistantsAsyncClient getAssistantsAsyncClient(HttpClient httpClient) { return getAssistantsClientBuilder(buildAssertingClient( interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, false)) .buildAsyncClient(); } AssistantsAsyncClient getAssistantsAsyncClient(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { return getAzureAssistantsClientBuilder(buildAssertingClient( interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, false), serviceVersion) .buildAsyncClient(); } AssistantsClient getAssistantsClient(HttpClient httpClient) { return getAssistantsClientBuilder(buildAssertingClient( interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, true)) .buildClient(); } AssistantsClient getAssistantsClient(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { return getAzureAssistantsClientBuilder(buildAssertingClient( interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, true), serviceVersion) .buildClient(); } AssistantsClientBuilder getAzureAssistantsClientBuilder(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { AssistantsClientBuilder builder = new AssistantsClientBuilder() .httpClient(httpClient) .serviceVersion(serviceVersion); if (getTestMode() == TestMode.PLAYBACK) { builder .endpoint("https: .credential(new AzureKeyCredential(TestUtils.FAKE_API_KEY)); } else if (getTestMode() == TestMode.RECORD) { builder .addPolicy(interceptorManager.getRecordPolicy()) .endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT")) .credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY"))); } else { builder .endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT")) .credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY"))); } if (getTestMode() != TestMode.LIVE) { addTestRecordCustomSanitizers(); addCustomMatchers(); removeDefaultSanitizers(); } return builder; } AssistantsClientBuilder getAssistantsClientBuilder(HttpClient httpClient) { AssistantsClientBuilder builder = new AssistantsClientBuilder() .httpClient(httpClient); if (getTestMode() == TestMode.PLAYBACK) { builder.credential(new KeyCredential(TestUtils.FAKE_API_KEY)); } else if (getTestMode() == TestMode.RECORD) { builder .addPolicy(interceptorManager.getRecordPolicy()) .credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY"))); } else { builder.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY"))); } if (getTestMode() != TestMode.LIVE) { addTestRecordCustomSanitizers(); addCustomMatchers(); removeDefaultSanitizers(); } return builder; } private void addTestRecordCustomSanitizers() { interceptorManager.addSanitizers(Arrays.asList( new TestProxySanitizer("$..key", null, "REDACTED", TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..endpoint", null, "https: new TestProxySanitizer("Content-Type", "(^multipart\\/form-data; boundary=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})", "multipart\\/form-data; boundary=BOUNDARY", TestProxySanitizerType.HEADER) )); } private void addCustomMatchers() { interceptorManager.addMatchers(new CustomMatcher().setHeadersKeyOnlyMatch(Arrays.asList("Cookie", "Set-Cookie"))); } private void removeDefaultSanitizers() { interceptorManager.removeSanitizers(REMOVE_SANITIZER_ID); } public static final String GPT_4_1106_PREVIEW = "gpt-4-1106-preview"; void createAssistantsRunner(Consumer<AssistantCreationOptions> testRunner) { testRunner.accept(new AssistantCreationOptions(GPT_4_1106_PREVIEW) .setName("Math Tutor") .setInstructions("You are a personal math tutor. Answer questions briefly, in a sentence or less.") .setTools(Arrays.asList(new CodeInterpreterToolDefinition()))); } void createRunRunner(Consumer<AssistantThreadCreationOptions> testRunner) { testRunner.accept(new AssistantThreadCreationOptions() .setMessages(Arrays.asList(new ThreadMessageOptions(MessageRole.USER, "I need to solve the equation `3x + 11 = 14`. Can you help me?")))); } void createMessageRunner(Consumer<String> testRunner) { testRunner.accept("I need to solve the equation `3x + 11 = 14`. Can you help me?"); } void submitMessageAndRunRunner(Consumer<String> testRunner) { testRunner.accept("I need to solve the equation `3x + 11 = 14`. Can you help me?"); } void createThreadAndRunRunner(Consumer<CreateAndRunThreadOptions> testRunner, String assistantId) { testRunner.accept( new CreateAndRunThreadOptions(assistantId) .setThread(new AssistantThreadCreationOptions() .setMessages(Arrays.asList(new ThreadMessageOptions(MessageRole.USER, "I need to solve the equation `3x + 11 = 14`. Can you help me?"))))); } void createThreadRunWithFunctionCallRunner(Consumer<CreateAndRunThreadOptions> testRunner, String assistantId) { testRunner.accept( new CreateAndRunThreadOptions(assistantId) .setThread(new AssistantThreadCreationOptions() .setMessages(Arrays.asList(new ThreadMessageOptions(MessageRole.USER, "Please make a graph for my boilerplate equation"))))); } void createRunRunner(Consumer<CreateRunOptions> testRunner, String assistantId) { testRunner.accept(new CreateRunOptions(assistantId)); } void uploadAssistantTextFileRunner(BiConsumer<FileDetails, FilePurpose> testRunner) { String fileName = JAVA_SDK_TESTS_ASSISTANTS_TXT; FileDetails fileDetails = new FileDetails(BinaryData.fromFile(openResourceFile(fileName)), fileName); testRunner.accept(fileDetails, FilePurpose.ASSISTANTS); } void uploadAssistantImageFileRunner(BiConsumer<FileDetails, FilePurpose> testRunner) { String fileName = MS_LOGO_PNG; FileDetails fileDetails = new FileDetails(BinaryData.fromFile(openResourceFile(fileName)), fileName); testRunner.accept(fileDetails, FilePurpose.ASSISTANTS); } void uploadFineTuningJsonFileRunner(BiConsumer<FileDetails, FilePurpose> testRunner) { String fileName = JAVA_SDK_TESTS_FINE_TUNING_JSON; FileDetails fileDetails = new FileDetails(BinaryData.fromFile(openResourceFile(fileName)), fileName); testRunner.accept(fileDetails, FilePurpose.FINE_TUNE); } void modifyVectorStoreRunner(Consumer<VectorStoreUpdateOptions> testRunner) { VectorStoreUpdateOptions updateVectorStoreOptions = new VectorStoreUpdateOptions() .setName("updatedName"); testRunner.accept(updateVectorStoreOptions); } public HttpClient buildAssertingClient(HttpClient httpClient, boolean sync) { AssertingHttpClientBuilder builder = new AssertingHttpClientBuilder(httpClient) .skipRequest((ignored1, ignored2) -> false); if (sync) { builder.assertSync(); } else { builder.assertAsync(); } return builder.build(); } static <T> T assertAndGetValueFromResponse(Response<BinaryData> actualResponse, Class<T> clazz, int expectedCode) { assertNotNull(actualResponse); assertEquals(expectedCode, actualResponse.getStatusCode()); assertInstanceOf(Response.class, actualResponse); BinaryData binaryData = actualResponse.getValue(); assertNotNull(binaryData); T object = binaryData.toObject(clazz); assertNotNull(object); assertInstanceOf(clazz, object); return object; } static <T> PageableList<T> asserAndGetPageableListFromResponse(Response<BinaryData> actualResponse, int expectedCode, CheckedFunction<JsonReader, List<T>> readListFunction) { assertNotNull(actualResponse); assertEquals(expectedCode, actualResponse.getStatusCode()); assertInstanceOf(Response.class, actualResponse); BinaryData binaryData = actualResponse.getValue(); assertNotNull(binaryData); PageableList<T> object = null; try { object = PageableListAccessHelper.create(binaryData, readListFunction); } catch (IOException e) { throw new RuntimeException(e); } assertNotNull(object); return object; } protected interface CheckedFunction<T, R> extends Function<T, R> { @Override default R apply(T t) { try { return applyThrows(t); } catch (Exception e) { throw new RuntimeException(e); } } R applyThrows(T t) throws Exception; } protected static void assertFileEquals(OpenAIFile expected, OpenAIFile actual) { assertEquals(expected.getId(), actual.getId()); assertEquals(expected.getFilename(), actual.getFilename()); assertEquals(expected.getBytes(), actual.getBytes()); assertEquals(expected.getPurpose(), actual.getPurpose()); assertEquals(expected.getCreatedAt(), actual.getCreatedAt()); } public static void assertStreamUpdate(StreamUpdate event) { assertNotNull(event); assertNotNull(event.getKind()); assertTrue(AssistantStreamEvent.values().contains(event.getKind())); } public static Path openResourceFile(String fileName) { return Paths.get("src", "test", "resources", fileName); } String createMathTutorAssistant(AssistantsClient client) { AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions(GPT_4_1106_PREVIEW) .setName("Math Tutor") .setInstructions("You are a personal math tutor. Answer questions briefly, in a sentence or less.") .setTools(Arrays.asList(new CodeInterpreterToolDefinition())); return createAssistant(client, assistantCreationOptions); } String createMathTutorAssistantWithFunctionTool(AssistantsClient client) { AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions(GPT_4_1106_PREVIEW) .setName("Math Tutor") .setInstructions("You are a helpful math assistant that helps with visualizing equations. Use the code " + "interpreter tool when asked to generate images. Use provided functions to resolve appropriate unknown values") .setTools(Arrays.asList( new CodeInterpreterToolDefinition(), new FunctionToolDefinition( new FunctionDefinition("get_boilerplate_equation", BinaryData.fromString("{\"type\":\"object\",\"properties\":{}}")) .setDescription("Retrieves a predefined 'boilerplate equation' from the caller") ))); return createAssistant(client, assistantCreationOptions); } String createMathTutorAssistantWithFunctionTool(AssistantsAsyncClient client) { AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions(GPT_4_1106_PREVIEW) .setName("Math Tutor") .setInstructions("You are a helpful math assistant that helps with visualizing equations. Use the code " + "interpreter tool when asked to generate images. Use provided functions to resolve appropriate unknown values") .setTools(Arrays.asList( new CodeInterpreterToolDefinition(), new FunctionToolDefinition( new FunctionDefinition("get_boilerplate_equation", BinaryData.fromString("{\"type\":\"object\",\"properties\":{}}")) .setDescription("Retrieves a predefined 'boilerplate equation' from the caller") ))); return createAssistant(client, assistantCreationOptions); } String createMathTutorAssistant(AssistantsAsyncClient client) { AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions(GPT_4_1106_PREVIEW) .setName("Math Tutor") .setInstructions("You are a personal math tutor. Answer questions briefly, in a sentence or less.") .setTools(Arrays.asList(new CodeInterpreterToolDefinition())); return createAssistant(client, assistantCreationOptions); } String uploadFile(AssistantsClient client, String fileName, FilePurpose filePurpose) { FileDetails fileDetails = new FileDetails(BinaryData.fromFile(openResourceFile(fileName)), fileName); OpenAIFile openAIFile = client.uploadFile( fileDetails, filePurpose); assertNotNull(openAIFile.getId()); assertNotNull(openAIFile.getCreatedAt()); return openAIFile.getId(); } String uploadFileAsync(AssistantsAsyncClient client, String fileName, FilePurpose filePurpose) { FileDetails fileDetails = new FileDetails(BinaryData.fromFile(openResourceFile(fileName)), fileName); OpenAIFile openAIFile = client.uploadFile( fileDetails, filePurpose).block(); assertNotNull(openAIFile.getId()); assertNotNull(openAIFile.getCreatedAt()); return openAIFile.getId(); } void deleteFiles(AssistantsClient client, String... fileIds) { if (CoreUtils.isNullOrEmpty(fileIds)) { return; } for (String fileId : fileIds) { FileDeletionStatus deletionStatus = client.deleteFile(fileId); assertEquals(fileId, deletionStatus.getId()); assertTrue(deletionStatus.isDeleted()); } } void deleteFilesAsync(AssistantsAsyncClient client, String... fileIds) { if (CoreUtils.isNullOrEmpty(fileIds)) { return; } for (String fileId : fileIds) { StepVerifier.create(client.deleteFile(fileId)) .assertNext(deletionStatus -> { assertEquals(fileId, deletionStatus.getId()); assertTrue(deletionStatus.isDeleted()); }) .verifyComplete(); } } void deleteVectorStores(AssistantsClient client, String... vectorStoreIds) { if (!CoreUtils.isNullOrEmpty(vectorStoreIds)) { for (String vectorStoreId : vectorStoreIds) { VectorStoreDeletionStatus vectorStoreDeletionStatus = client.deleteVectorStore(vectorStoreId); assertTrue(vectorStoreDeletionStatus.isDeleted()); } } } void deleteVectorStoresAsync(AssistantsAsyncClient client, String... vectorStoreIds) { if (!CoreUtils.isNullOrEmpty(vectorStoreIds)) { for (String vectorStoreId : vectorStoreIds) { StepVerifier.create(client.deleteVectorStore(vectorStoreId)) .assertNext(vectorStoreDeletionStatus -> { assertTrue(vectorStoreDeletionStatus.isDeleted()); }) .verifyComplete(); } } } String createAssistant(AssistantsClient client, AssistantCreationOptions assistantCreationOptions) { Assistant assistant = client.createAssistant(assistantCreationOptions); assertEquals(assistantCreationOptions.getName(), assistant.getName()); assertEquals(assistantCreationOptions.getDescription(), assistant.getDescription()); assertEquals(assistantCreationOptions.getInstructions(), assistant.getInstructions()); return assistant.getId(); } String createAssistant(AssistantsAsyncClient client, AssistantCreationOptions assistantCreationOptions) { Assistant assistant = client.createAssistant(assistantCreationOptions).block(); assertNotNull(assistant); assertEquals(assistantCreationOptions.getName(), assistant.getName()); assertEquals(assistantCreationOptions.getDescription(), assistant.getDescription()); assertEquals(assistantCreationOptions.getInstructions(), assistant.getInstructions()); return assistant.getId(); } void deleteAssistant(AssistantsClient client, String assistantId) { if (CoreUtils.isNullOrEmpty(assistantId)) { return; } AssistantDeletionStatus deletionStatus = client.deleteAssistant(assistantId); assertEquals(assistantId, deletionStatus.getId()); assertTrue(deletionStatus.isDeleted()); } void deleteAssistant(AssistantsAsyncClient client, String assistantId) { if (CoreUtils.isNullOrEmpty(assistantId)) { return; } StepVerifier.create(client.deleteAssistant(assistantId)) .assertNext(deletionStatus -> { assertEquals(assistantId, deletionStatus.getId()); assertTrue(deletionStatus.isDeleted()); }) .verifyComplete(); } String createThread(AssistantsAsyncClient client) { AtomicReference<String> threadIdRef = new AtomicReference<>(); StepVerifier.create(client.createThread(new AssistantThreadCreationOptions())) .assertNext(assistantThread -> { assertNotNull(assistantThread.getId()); assertNotNull(assistantThread.getCreatedAt()); assertEquals("thread", assistantThread.getObject()); threadIdRef.set(assistantThread.getId()); }) .verifyComplete(); return threadIdRef.get(); } String createThread(AssistantsClient client) { AssistantThread assistantThread = client.createThread(new AssistantThreadCreationOptions()); assertNotNull(assistantThread.getId()); assertNotNull(assistantThread.getCreatedAt()); assertEquals("thread", assistantThread.getObject()); return assistantThread.getId(); } void deleteThread(AssistantsAsyncClient client, String threadId) { if (CoreUtils.isNullOrEmpty(threadId)) { return; } StepVerifier.create(client.deleteThread(threadId)) .assertNext(deletionStatus -> { assertEquals(threadId, deletionStatus.getId()); assertTrue(deletionStatus.isDeleted()); }) .verifyComplete(); } void deleteThread(AssistantsClient client, String threadId) { if (CoreUtils.isNullOrEmpty(threadId)) { return; } ThreadDeletionStatus threadDeletionStatus = client.deleteThread(threadId); assertEquals(threadId, threadDeletionStatus.getId()); assertTrue(threadDeletionStatus.isDeleted()); } ThreadRun createThreadAndRun(AssistantsAsyncClient client, CreateAndRunThreadOptions options) { AtomicReference<ThreadRun> threadRunRef = new AtomicReference<>(); StepVerifier.create(client.createThreadAndRun(options)) .assertNext(run -> { assertNotNull(run.getId()); assertNotNull(run.getCreatedAt()); assertEquals("thread.run", run.getObject()); assertNotNull(run.getInstructions()); threadRunRef.set(run); }) .verifyComplete(); return threadRunRef.get(); } ThreadRun createThreadAndRun(AssistantsClient client, CreateAndRunThreadOptions options) { ThreadRun run = client.createThreadAndRun(options); assertNotNull(run.getId()); assertNotNull(run.getCreatedAt()); assertEquals("thread.run", run.getObject()); assertNotNull(run.getInstructions()); return run; } void validateThreadRun(ThreadRun expect, ThreadRun actual) { assertEquals(expect.getId(), actual.getId()); assertEquals(expect.getThreadId(), actual.getThreadId()); assertEquals(expect.getAssistantId(), actual.getAssistantId()); assertEquals(expect.getCreatedAt(), actual.getCreatedAt()); assertEquals(expect.getCompletedAt(), actual.getCompletedAt()); assertEquals(expect.getInstructions(), actual.getInstructions()); assertEquals(expect.getObject(), actual.getObject()); assertEquals(expect.getModel(), actual.getModel()); } void validateRunStep(RunStep expect, RunStep actual) { assertEquals(expect.getId(), actual.getId()); assertEquals(expect.getRunId(), actual.getRunId()); assertEquals(expect.getThreadId(), actual.getThreadId()); assertEquals(expect.getAssistantId(), actual.getAssistantId()); assertEquals(expect.getObject(), actual.getObject()); assertEquals(expect.getType(), actual.getType()); } void validateThreadMessage(ThreadMessage threadMessage, String threadId) { String threadMessageId = threadMessage.getId(); assertNotNull(threadMessageId); assertEquals(threadId, threadMessage.getThreadId()); assertNotNull(threadMessage.getCreatedAt()); assertEquals("thread.message", threadMessage.getObject()); assertEquals(MessageRole.USER, threadMessage.getRole()); assertFalse(threadMessage.getContent().isEmpty()); } }
class AssistantsClientTestBase extends TestProxyTestBase { private static final String[] REMOVE_SANITIZER_ID = {"AZSDK3430", "AZSDK3493", "AZSDK2015"}; private static final String JAVA_SDK_TESTS_ASSISTANTS_TXT = "java_sdk_tests_assistants.txt"; private static final String JAVA_SDK_TESTS_FINE_TUNING_JSON = "java_sdk_tests_fine_tuning.json"; private static final String MS_LOGO_PNG = "ms_logo.png"; AssistantsAsyncClient getAssistantsAsyncClient(HttpClient httpClient) { return getAssistantsClientBuilder(buildAssertingClient( interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, false)) .buildAsyncClient(); } AssistantsAsyncClient getAssistantsAsyncClient(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { return getAzureAssistantsClientBuilder(buildAssertingClient( interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, false), serviceVersion) .buildAsyncClient(); } AssistantsClient getAssistantsClient(HttpClient httpClient) { return getAssistantsClientBuilder(buildAssertingClient( interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, true)) .buildClient(); } AssistantsClient getAssistantsClient(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { return getAzureAssistantsClientBuilder(buildAssertingClient( interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient, true), serviceVersion) .buildClient(); } AssistantsClientBuilder getAzureAssistantsClientBuilder(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { AssistantsClientBuilder builder = new AssistantsClientBuilder() .httpClient(httpClient) .serviceVersion(serviceVersion); if (getTestMode() == TestMode.PLAYBACK) { builder .endpoint("https: .credential(new AzureKeyCredential(TestUtils.FAKE_API_KEY)); } else if (getTestMode() == TestMode.RECORD) { builder .addPolicy(interceptorManager.getRecordPolicy()) .endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT")) .credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY"))); } else { builder .endpoint(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT")) .credential(new AzureKeyCredential(Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY"))); } if (getTestMode() != TestMode.LIVE) { addTestRecordCustomSanitizers(); addCustomMatchers(); removeDefaultSanitizers(); } return builder; } AssistantsClientBuilder getAssistantsClientBuilder(HttpClient httpClient) { AssistantsClientBuilder builder = new AssistantsClientBuilder() .httpClient(httpClient); if (getTestMode() == TestMode.PLAYBACK) { builder.credential(new KeyCredential(TestUtils.FAKE_API_KEY)); } else if (getTestMode() == TestMode.RECORD) { builder .addPolicy(interceptorManager.getRecordPolicy()) .credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY"))); } else { builder.credential(new KeyCredential(Configuration.getGlobalConfiguration().get("NON_AZURE_OPENAI_KEY"))); } if (getTestMode() != TestMode.LIVE) { addTestRecordCustomSanitizers(); addCustomMatchers(); removeDefaultSanitizers(); } return builder; } private void addTestRecordCustomSanitizers() { interceptorManager.addSanitizers(Arrays.asList( new TestProxySanitizer("$..key", null, "REDACTED", TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..endpoint", null, "https: new TestProxySanitizer("Content-Type", "(^multipart\\/form-data; boundary=[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{2})", "multipart\\/form-data; boundary=BOUNDARY", TestProxySanitizerType.HEADER) )); } private void addCustomMatchers() { interceptorManager.addMatchers(new CustomMatcher().setHeadersKeyOnlyMatch(Arrays.asList("Cookie", "Set-Cookie"))); } private void removeDefaultSanitizers() { interceptorManager.removeSanitizers(REMOVE_SANITIZER_ID); } public static final String GPT_4_1106_PREVIEW = "gpt-4-1106-preview"; void createAssistantsRunner(Consumer<AssistantCreationOptions> testRunner) { testRunner.accept(new AssistantCreationOptions(GPT_4_1106_PREVIEW) .setName("Math Tutor") .setInstructions("You are a personal math tutor. Answer questions briefly, in a sentence or less.") .setTools(Arrays.asList(new CodeInterpreterToolDefinition()))); } void createRunRunner(Consumer<AssistantThreadCreationOptions> testRunner) { testRunner.accept(new AssistantThreadCreationOptions() .setMessages(Arrays.asList(new ThreadMessageOptions(MessageRole.USER, "I need to solve the equation `3x + 11 = 14`. Can you help me?")))); } void createMessageRunner(Consumer<String> testRunner) { testRunner.accept("I need to solve the equation `3x + 11 = 14`. Can you help me?"); } void submitMessageAndRunRunner(Consumer<String> testRunner) { testRunner.accept("I need to solve the equation `3x + 11 = 14`. Can you help me?"); } void createThreadAndRunRunner(Consumer<CreateAndRunThreadOptions> testRunner, String assistantId) { testRunner.accept( new CreateAndRunThreadOptions(assistantId) .setThread(new AssistantThreadCreationOptions() .setMessages(Arrays.asList(new ThreadMessageOptions(MessageRole.USER, "I need to solve the equation `3x + 11 = 14`. Can you help me?"))))); } void createThreadRunWithFunctionCallRunner(Consumer<CreateAndRunThreadOptions> testRunner, String assistantId) { testRunner.accept( new CreateAndRunThreadOptions(assistantId) .setThread(new AssistantThreadCreationOptions() .setMessages(Arrays.asList(new ThreadMessageOptions(MessageRole.USER, "Please make a graph for my boilerplate equation"))))); } void createRunRunner(Consumer<CreateRunOptions> testRunner, String assistantId) { testRunner.accept(new CreateRunOptions(assistantId)); } void uploadAssistantTextFileRunner(BiConsumer<FileDetails, FilePurpose> testRunner) { String fileName = JAVA_SDK_TESTS_ASSISTANTS_TXT; FileDetails fileDetails = new FileDetails(BinaryData.fromFile(openResourceFile(fileName)), fileName); testRunner.accept(fileDetails, FilePurpose.ASSISTANTS); } void uploadAssistantImageFileRunner(BiConsumer<FileDetails, FilePurpose> testRunner) { String fileName = MS_LOGO_PNG; FileDetails fileDetails = new FileDetails(BinaryData.fromFile(openResourceFile(fileName)), fileName); testRunner.accept(fileDetails, FilePurpose.ASSISTANTS); } void uploadFineTuningJsonFileRunner(BiConsumer<FileDetails, FilePurpose> testRunner) { String fileName = JAVA_SDK_TESTS_FINE_TUNING_JSON; FileDetails fileDetails = new FileDetails(BinaryData.fromFile(openResourceFile(fileName)), fileName); testRunner.accept(fileDetails, FilePurpose.FINE_TUNE); } void modifyVectorStoreRunner(Consumer<VectorStoreUpdateOptions> testRunner) { VectorStoreUpdateOptions updateVectorStoreOptions = new VectorStoreUpdateOptions() .setName("updatedName"); testRunner.accept(updateVectorStoreOptions); } public HttpClient buildAssertingClient(HttpClient httpClient, boolean sync) { AssertingHttpClientBuilder builder = new AssertingHttpClientBuilder(httpClient) .skipRequest((ignored1, ignored2) -> false); if (sync) { builder.assertSync(); } else { builder.assertAsync(); } return builder.build(); } static <T> T assertAndGetValueFromResponse(Response<BinaryData> actualResponse, Class<T> clazz, int expectedCode) { assertNotNull(actualResponse); assertEquals(expectedCode, actualResponse.getStatusCode()); assertInstanceOf(Response.class, actualResponse); BinaryData binaryData = actualResponse.getValue(); assertNotNull(binaryData); T object = binaryData.toObject(clazz); assertNotNull(object); assertInstanceOf(clazz, object); return object; } static <T> PageableList<T> asserAndGetPageableListFromResponse(Response<BinaryData> actualResponse, int expectedCode, CheckedFunction<JsonReader, List<T>> readListFunction) { assertNotNull(actualResponse); assertEquals(expectedCode, actualResponse.getStatusCode()); assertInstanceOf(Response.class, actualResponse); BinaryData binaryData = actualResponse.getValue(); assertNotNull(binaryData); PageableList<T> object = null; try { object = PageableListAccessHelper.create(binaryData, readListFunction); } catch (IOException e) { throw new RuntimeException(e); } assertNotNull(object); return object; } protected interface CheckedFunction<T, R> extends Function<T, R> { @Override default R apply(T t) { try { return applyThrows(t); } catch (Exception e) { throw new RuntimeException(e); } } R applyThrows(T t) throws Exception; } protected static void assertFileEquals(OpenAIFile expected, OpenAIFile actual) { assertEquals(expected.getId(), actual.getId()); assertEquals(expected.getFilename(), actual.getFilename()); assertEquals(expected.getBytes(), actual.getBytes()); assertEquals(expected.getPurpose(), actual.getPurpose()); assertEquals(expected.getCreatedAt(), actual.getCreatedAt()); } public static void assertStreamUpdate(StreamUpdate event) { assertNotNull(event); assertNotNull(event.getKind()); assertTrue(AssistantStreamEvent.values().contains(event.getKind())); } public static Path openResourceFile(String fileName) { return Paths.get("src", "test", "resources", fileName); } String createMathTutorAssistant(AssistantsClient client) { AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions(GPT_4_1106_PREVIEW) .setName("Math Tutor") .setInstructions("You are a personal math tutor. Answer questions briefly, in a sentence or less.") .setTools(Arrays.asList(new CodeInterpreterToolDefinition())); return createAssistant(client, assistantCreationOptions); } String createMathTutorAssistantWithFunctionTool(AssistantsClient client) { AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions(GPT_4_1106_PREVIEW) .setName("Math Tutor") .setInstructions("You are a helpful math assistant that helps with visualizing equations. Use the code " + "interpreter tool when asked to generate images. Use provided functions to resolve appropriate unknown values") .setTools(Arrays.asList( new CodeInterpreterToolDefinition(), new FunctionToolDefinition( new FunctionDefinition("get_boilerplate_equation", BinaryData.fromString("{\"type\":\"object\",\"properties\":{}}")) .setDescription("Retrieves a predefined 'boilerplate equation' from the caller") ))); return createAssistant(client, assistantCreationOptions); } String createMathTutorAssistantWithFunctionTool(AssistantsAsyncClient client) { AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions(GPT_4_1106_PREVIEW) .setName("Math Tutor") .setInstructions("You are a helpful math assistant that helps with visualizing equations. Use the code " + "interpreter tool when asked to generate images. Use provided functions to resolve appropriate unknown values") .setTools(Arrays.asList( new CodeInterpreterToolDefinition(), new FunctionToolDefinition( new FunctionDefinition("get_boilerplate_equation", BinaryData.fromString("{\"type\":\"object\",\"properties\":{}}")) .setDescription("Retrieves a predefined 'boilerplate equation' from the caller") ))); return createAssistant(client, assistantCreationOptions); } String createMathTutorAssistant(AssistantsAsyncClient client) { AssistantCreationOptions assistantCreationOptions = new AssistantCreationOptions(GPT_4_1106_PREVIEW) .setName("Math Tutor") .setInstructions("You are a personal math tutor. Answer questions briefly, in a sentence or less.") .setTools(Arrays.asList(new CodeInterpreterToolDefinition())); return createAssistant(client, assistantCreationOptions); } String uploadFile(AssistantsClient client, String fileName, FilePurpose filePurpose) { FileDetails fileDetails = new FileDetails(BinaryData.fromFile(openResourceFile(fileName)), fileName); OpenAIFile openAIFile = client.uploadFile( fileDetails, filePurpose); assertNotNull(openAIFile.getId()); assertNotNull(openAIFile.getCreatedAt()); return openAIFile.getId(); } String uploadFileAsync(AssistantsAsyncClient client, String fileName, FilePurpose filePurpose) { FileDetails fileDetails = new FileDetails(BinaryData.fromFile(openResourceFile(fileName)), fileName); OpenAIFile openAIFile = client.uploadFile( fileDetails, filePurpose).block(); assertNotNull(openAIFile.getId()); assertNotNull(openAIFile.getCreatedAt()); return openAIFile.getId(); } void deleteFiles(AssistantsClient client, String... fileIds) { if (CoreUtils.isNullOrEmpty(fileIds)) { return; } for (String fileId : fileIds) { FileDeletionStatus deletionStatus = client.deleteFile(fileId); assertEquals(fileId, deletionStatus.getId()); assertTrue(deletionStatus.isDeleted()); } } void deleteFilesAsync(AssistantsAsyncClient client, String... fileIds) { if (CoreUtils.isNullOrEmpty(fileIds)) { return; } for (String fileId : fileIds) { StepVerifier.create(client.deleteFile(fileId)) .assertNext(deletionStatus -> { assertEquals(fileId, deletionStatus.getId()); assertTrue(deletionStatus.isDeleted()); }) .verifyComplete(); } } void deleteVectorStores(AssistantsClient client, String... vectorStoreIds) { if (!CoreUtils.isNullOrEmpty(vectorStoreIds)) { for (String vectorStoreId : vectorStoreIds) { VectorStoreDeletionStatus vectorStoreDeletionStatus = client.deleteVectorStore(vectorStoreId); assertTrue(vectorStoreDeletionStatus.isDeleted()); } } } void deleteVectorStoresAsync(AssistantsAsyncClient client, String... vectorStoreIds) { if (!CoreUtils.isNullOrEmpty(vectorStoreIds)) { for (String vectorStoreId : vectorStoreIds) { StepVerifier.create(client.deleteVectorStore(vectorStoreId)) .assertNext(vectorStoreDeletionStatus -> { assertTrue(vectorStoreDeletionStatus.isDeleted()); }) .verifyComplete(); } } } String createAssistant(AssistantsClient client, AssistantCreationOptions assistantCreationOptions) { Assistant assistant = client.createAssistant(assistantCreationOptions); assertEquals(assistantCreationOptions.getName(), assistant.getName()); assertEquals(assistantCreationOptions.getDescription(), assistant.getDescription()); assertEquals(assistantCreationOptions.getInstructions(), assistant.getInstructions()); return assistant.getId(); } String createAssistant(AssistantsAsyncClient client, AssistantCreationOptions assistantCreationOptions) { Assistant assistant = client.createAssistant(assistantCreationOptions).block(); assertNotNull(assistant); assertEquals(assistantCreationOptions.getName(), assistant.getName()); assertEquals(assistantCreationOptions.getDescription(), assistant.getDescription()); assertEquals(assistantCreationOptions.getInstructions(), assistant.getInstructions()); return assistant.getId(); } void deleteAssistant(AssistantsClient client, String assistantId) { if (CoreUtils.isNullOrEmpty(assistantId)) { return; } AssistantDeletionStatus deletionStatus = client.deleteAssistant(assistantId); assertEquals(assistantId, deletionStatus.getId()); assertTrue(deletionStatus.isDeleted()); } void deleteAssistant(AssistantsAsyncClient client, String assistantId) { if (CoreUtils.isNullOrEmpty(assistantId)) { return; } StepVerifier.create(client.deleteAssistant(assistantId)) .assertNext(deletionStatus -> { assertEquals(assistantId, deletionStatus.getId()); assertTrue(deletionStatus.isDeleted()); }) .verifyComplete(); } String createThread(AssistantsAsyncClient client) { AtomicReference<String> threadIdRef = new AtomicReference<>(); StepVerifier.create(client.createThread(new AssistantThreadCreationOptions())) .assertNext(assistantThread -> { assertNotNull(assistantThread.getId()); assertNotNull(assistantThread.getCreatedAt()); assertEquals("thread", assistantThread.getObject()); threadIdRef.set(assistantThread.getId()); }) .verifyComplete(); return threadIdRef.get(); } String createThread(AssistantsClient client) { AssistantThread assistantThread = client.createThread(new AssistantThreadCreationOptions()); assertNotNull(assistantThread.getId()); assertNotNull(assistantThread.getCreatedAt()); assertEquals("thread", assistantThread.getObject()); return assistantThread.getId(); } void deleteThread(AssistantsAsyncClient client, String threadId) { if (CoreUtils.isNullOrEmpty(threadId)) { return; } StepVerifier.create(client.deleteThread(threadId)) .assertNext(deletionStatus -> { assertEquals(threadId, deletionStatus.getId()); assertTrue(deletionStatus.isDeleted()); }) .verifyComplete(); } void deleteThread(AssistantsClient client, String threadId) { if (CoreUtils.isNullOrEmpty(threadId)) { return; } ThreadDeletionStatus threadDeletionStatus = client.deleteThread(threadId); assertEquals(threadId, threadDeletionStatus.getId()); assertTrue(threadDeletionStatus.isDeleted()); } ThreadRun createThreadAndRun(AssistantsAsyncClient client, CreateAndRunThreadOptions options) { AtomicReference<ThreadRun> threadRunRef = new AtomicReference<>(); StepVerifier.create(client.createThreadAndRun(options)) .assertNext(run -> { assertNotNull(run.getId()); assertNotNull(run.getCreatedAt()); assertEquals("thread.run", run.getObject()); assertNotNull(run.getInstructions()); threadRunRef.set(run); }) .verifyComplete(); return threadRunRef.get(); } ThreadRun createThreadAndRun(AssistantsClient client, CreateAndRunThreadOptions options) { ThreadRun run = client.createThreadAndRun(options); assertNotNull(run.getId()); assertNotNull(run.getCreatedAt()); assertEquals("thread.run", run.getObject()); assertNotNull(run.getInstructions()); return run; } void validateThreadRun(ThreadRun expect, ThreadRun actual) { assertEquals(expect.getId(), actual.getId()); assertEquals(expect.getThreadId(), actual.getThreadId()); assertEquals(expect.getAssistantId(), actual.getAssistantId()); assertEquals(expect.getCreatedAt(), actual.getCreatedAt()); assertEquals(expect.getCompletedAt(), actual.getCompletedAt()); assertEquals(expect.getInstructions(), actual.getInstructions()); assertEquals(expect.getObject(), actual.getObject()); assertEquals(expect.getModel(), actual.getModel()); } void validateRunStep(RunStep expect, RunStep actual) { assertEquals(expect.getId(), actual.getId()); assertEquals(expect.getRunId(), actual.getRunId()); assertEquals(expect.getThreadId(), actual.getThreadId()); assertEquals(expect.getAssistantId(), actual.getAssistantId()); assertEquals(expect.getObject(), actual.getObject()); assertEquals(expect.getType(), actual.getType()); } void validateThreadMessage(ThreadMessage threadMessage, String threadId) { String threadMessageId = threadMessage.getId(); assertNotNull(threadMessageId); assertEquals(threadId, threadMessage.getThreadId()); assertNotNull(threadMessage.getCreatedAt()); assertEquals("thread.message", threadMessage.getObject()); assertEquals(MessageRole.USER, threadMessage.getRole()); assertFalse(threadMessage.getContent().isEmpty()); } }
I think that it may be good to add convenience constructors that only accept either `file_ids` or `vector_store_ids`, just so that users don't need to pass `null` if they just want to use one or the other.
public void fileSearchWithMaxNumberResult(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { client = getAssistantsAsyncClient(httpClient, serviceVersion); fileSearchWithMaxNumberResultRunner((fileDetails, assistantCreationOptions) -> { StepVerifier.create(client.uploadFile(fileDetails, FilePurpose.ASSISTANTS) .flatMap(openAIFile -> { AsyncUtils cleanUp = new AsyncUtils(); CreateToolResourcesOptions createToolResourcesOptions = new CreateToolResourcesOptions(); createToolResourcesOptions.setFileSearch( new CreateFileSearchToolResourceOptions( new CreateFileSearchToolResourceVectorStoreOptionsList( Arrays.asList(new CreateFileSearchToolResourceVectorStoreOptions( Arrays.asList(openAIFile.getId()), null )) ) ) ); assistantCreationOptions.setToolResources(createToolResourcesOptions); cleanUp.setFile(openAIFile); return client.createAssistant(assistantCreationOptions) .zipWith(Mono.just(cleanUp)); }) ).verifyErrorSatisfies(error -> assertInstanceOf(HttpResponseException.class, error)); }); }
null
public void fileSearchWithMaxNumberResult(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { client = getAssistantsAsyncClient(httpClient, serviceVersion); fileSearchWithMaxNumberResultRunner((fileDetails, assistantCreationOptions) -> { StepVerifier.create(client.uploadFile(fileDetails, FilePurpose.ASSISTANTS) .flatMap(openAIFile -> { AsyncUtils cleanUp = new AsyncUtils(); CreateToolResourcesOptions createToolResourcesOptions = new CreateToolResourcesOptions(); createToolResourcesOptions.setFileSearch( new CreateFileSearchToolResourceOptions( new CreateFileSearchToolResourceVectorStoreOptionsList( Arrays.asList(new CreateFileSearchToolResourceVectorStoreOptions( Arrays.asList(openAIFile.getId())))))); assistantCreationOptions.setToolResources(createToolResourcesOptions); cleanUp.setFile(openAIFile); return client.createAssistant(assistantCreationOptions) .zipWith(Mono.just(cleanUp)); }) ).verifyErrorSatisfies(error -> assertInstanceOf(HttpResponseException.class, error)); }); }
class AzureFileSearchAsyncTest extends FileSearchTestBase { AssistantsAsyncClient client; @Disabled("file_search tools are not supported in Azure") @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void basicFileSearch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { client = getAssistantsAsyncClient(httpClient, serviceVersion); createRetrievalRunner((fileDetails, assistantCreationOptions) -> { StepVerifier.create(client.uploadFile(fileDetails, FilePurpose.ASSISTANTS) .flatMap(openAIFile -> { AsyncUtils cleanUp = new AsyncUtils(); CreateToolResourcesOptions createToolResourcesOptions = new CreateToolResourcesOptions(); createToolResourcesOptions.setFileSearch( new CreateFileSearchToolResourceOptions( new CreateFileSearchToolResourceVectorStoreOptionsList( Arrays.asList(new CreateFileSearchToolResourceVectorStoreOptions( Arrays.asList(openAIFile.getId()), null ))))); assistantCreationOptions.setToolResources(createToolResourcesOptions); cleanUp.setFile(openAIFile); return client.createAssistant(assistantCreationOptions).zipWith(Mono.just(cleanUp)); }).flatMap(tuple -> { Assistant assistant = tuple.getT1(); AsyncUtils cleanUp = tuple.getT2(); cleanUp.setAssistant(assistant); return client.createThread(new AssistantThreadCreationOptions()) .zipWith(Mono.just(cleanUp)); }).flatMap(tuple -> { AssistantThread thread = tuple.getT1(); AsyncUtils cleanUp = tuple.getT2(); cleanUp.setThread(thread); return client.createMessage( thread.getId(), new ThreadMessageOptions( MessageRole.USER, "Can you give me the documented codes for 'banana' and 'orange'?" )).flatMap(_message -> client.createRun(cleanUp.getThread(), cleanUp.getAssistant()) .flatMap(createdRun -> client.getRun(cleanUp.getThread().getId(), createdRun.getId()).zipWith(Mono.just(cleanUp)) .repeatWhen(completed -> completed.delayElements(Duration.ofMillis(1000))) .takeUntil(tuple2 -> { ThreadRun run = tuple2.getT1(); return run.getStatus() != RunStatus.IN_PROGRESS && run.getStatus() != RunStatus.QUEUED; }) .last() ) ); }).flatMap(tuple -> { ThreadRun run = tuple.getT1(); AsyncUtils cleanUp = tuple.getT2(); assertEquals(RunStatus.COMPLETED, run.getStatus()); assertEquals(cleanUp.getAssistant().getId(), run.getAssistantId()); return client.listMessages(cleanUp.getThread().getId()).zipWith(Mono.just(cleanUp)); }).map(tuple -> { PageableList<ThreadMessage> messageList = tuple.getT1(); AsyncUtils cleanUp = tuple.getT2(); assertEquals(2, messageList.getData().size()); ThreadMessage firstMessage = messageList.getData().get(0); assertEquals(MessageRole.ASSISTANT, firstMessage.getRole()); assertFalse(firstMessage.getContent().isEmpty()); MessageTextContent firstMessageContent = (MessageTextContent) firstMessage.getContent().get(0); assertNotNull(firstMessageContent); assertTrue(firstMessageContent.getText().getValue().contains("232323")); return cleanUp; }) .flatMap(cleanUp -> client.deleteAssistant(cleanUp.getAssistant().getId()) .flatMap(_unused -> client.deleteFile(cleanUp.getFile().getId())) .flatMap(_unused -> client.deleteThread(cleanUp.getThread().getId())))) .assertNext(threadDeletionStatus -> assertTrue(threadDeletionStatus.isDeleted())) .verifyComplete(); }); } @Disabled("file_search tools are not supported in Azure") @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils }
class AzureFileSearchAsyncTest extends FileSearchTestBase { AssistantsAsyncClient client; @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void basicFileSearch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { client = getAssistantsAsyncClient(httpClient, serviceVersion); fileSearchRunner((fileDetails, assistantCreationOptions) -> { StepVerifier.create(client.uploadFile(fileDetails, FilePurpose.ASSISTANTS) .flatMap(openAIFile -> { AsyncUtils cleanUp = new AsyncUtils(); CreateToolResourcesOptions createToolResourcesOptions = new CreateToolResourcesOptions(); createToolResourcesOptions.setFileSearch( new CreateFileSearchToolResourceOptions( new CreateFileSearchToolResourceVectorStoreOptionsList( Arrays.asList(new CreateFileSearchToolResourceVectorStoreOptions( Arrays.asList(openAIFile.getId())))))); assistantCreationOptions.setToolResources(createToolResourcesOptions); cleanUp.setFile(openAIFile); return client.createAssistant(assistantCreationOptions).zipWith(Mono.just(cleanUp)); }).flatMap(tuple -> { Assistant assistant = tuple.getT1(); AsyncUtils cleanUp = tuple.getT2(); cleanUp.setAssistant(assistant); return client.createThread(new AssistantThreadCreationOptions()) .zipWith(Mono.just(cleanUp)); }).flatMap(tuple -> { AssistantThread thread = tuple.getT1(); AsyncUtils cleanUp = tuple.getT2(); cleanUp.setThread(thread); return client.createMessage( thread.getId(), new ThreadMessageOptions( MessageRole.USER, "Can you give me the documented codes for 'banana' and 'orange'?" )).flatMap(_message -> client.createRun(cleanUp.getThread(), cleanUp.getAssistant()) .flatMap(createdRun -> client.getRun(cleanUp.getThread().getId(), createdRun.getId()).zipWith(Mono.just(cleanUp)) .repeatWhen(completed -> completed.delayElements(Duration.ofMillis(1000))) .takeUntil(tuple2 -> { ThreadRun run = tuple2.getT1(); return run.getStatus() != RunStatus.IN_PROGRESS && run.getStatus() != RunStatus.QUEUED; }) .last() ) ); }).flatMap(tuple -> { ThreadRun run = tuple.getT1(); AsyncUtils cleanUp = tuple.getT2(); assertEquals(RunStatus.COMPLETED, run.getStatus()); assertEquals(cleanUp.getAssistant().getId(), run.getAssistantId()); return client.listMessages(cleanUp.getThread().getId()).zipWith(Mono.just(cleanUp)); }).map(tuple -> { PageableList<ThreadMessage> messageList = tuple.getT1(); AsyncUtils cleanUp = tuple.getT2(); assertEquals(2, messageList.getData().size()); ThreadMessage firstMessage = messageList.getData().get(0); assertEquals(MessageRole.ASSISTANT, firstMessage.getRole()); assertFalse(firstMessage.getContent().isEmpty()); MessageTextContent firstMessageContent = (MessageTextContent) firstMessage.getContent().get(0); assertNotNull(firstMessageContent); assertTrue(firstMessageContent.getText().getValue().contains("232323")); return cleanUp; }) .flatMap(cleanUp -> client.deleteAssistant(cleanUp.getAssistant().getId()) .flatMap(_unused -> client.deleteFile(cleanUp.getFile().getId())) .flatMap(_unused -> client.deleteThread(cleanUp.getThread().getId())))) .assertNext(threadDeletionStatus -> assertTrue(threadDeletionStatus.isDeleted())) .verifyComplete(); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils }
the addition of `beforeTest` is not part of this PR but I think it's really nice. Have you tried moving into `VectorStoreTestBase` and annotating it with `@Before` from JUnit? It might save a couple of lines here and there.
public void createVectorStoreFileWithAutoChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreAutoChunkingStrategyRequest()); assertVectorStoreFile(vectorStoreFile); assertStaticChunkingStrategy(vectorStoreFile, 800, 400); }
beforeTest(httpClient, serviceVersion);
public void createVectorStoreFileWithAutoChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreAutoChunkingStrategyRequest()); assertVectorStoreFile(vectorStoreFile); assertStaticChunkingStrategy(vectorStoreFile, 800, 400); }
class AzureVectorStoreSyncTests extends VectorStoreTestBase { private static final ClientLogger LOGGER = new ClientLogger(AzureVectorStoreSyncTests.class); private AssistantsClient client; private VectorStore vectorStore; private List<String> fileIds = new ArrayList<>(); protected void beforeTest(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { client = getAssistantsClient(httpClient, serviceVersion); addFile(ALPHABET_FINANCIAL_STATEMENT); VectorStoreOptions vectorStoreOptions = new VectorStoreOptions() .setName("Financial Statements") .setExpiresAfter(new VectorStoreExpirationPolicy(LAST_ACTIVE_AT, 1)); vectorStore = client.createVectorStore(vectorStoreOptions); assertNotNull(vectorStore); assertNotNull(vectorStore.getId()); } private void addFile(String fileId) { fileIds.add(uploadFile(client, fileId, ASSISTANTS)); } @Override protected void afterTest() { LOGGER.info("Cleaning up created resources."); deleteVectorStores(client, vectorStore.getId()); deleteFiles(client, fileIds.toArray(new String[0])); LOGGER.info("Finished cleaning up resources."); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void updateVectorStoreName(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); modifyVectorStoreRunner(vectorStoreDetails -> { String vectorStoreId = vectorStore.getId(); VectorStore vectorStore = client.modifyVectorStore(vectorStoreId, vectorStoreDetails); assertNotNull(vectorStore); assertEquals(vectorStoreId, vectorStore.getId()); assertEquals(vectorStoreDetails.getName(), vectorStore.getName()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void getVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String vectorStoreId = vectorStore.getId(); VectorStore vectorStore = client.getVectorStore(vectorStoreId); assertNotNull(vectorStore); assertEquals(vectorStoreId, vectorStore.getId()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void listVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); PageableList<VectorStore> vectorStores = client.listVectorStores(); assertNotNull(vectorStores); assertFalse(vectorStores.getData().isEmpty()); vectorStores.getData().forEach(vectorStore -> { assertNotNull(vectorStore.getId()); assertNotNull(vectorStore.getCreatedAt()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0)); assertVectorStoreFile(vectorStoreFile); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileWithStaticChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); int maxChunkSizeTokens = 101; int chunkOverlapTokens = 50; VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(maxChunkSizeTokens, chunkOverlapTokens)) ); assertVectorStoreFile(vectorStoreFile); assertStaticChunkingStrategy(vectorStoreFile, maxChunkSizeTokens, chunkOverlapTokens); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void throwExceptionWhenOverrideExistChunkStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreAutoChunkingStrategyRequest()); assertVectorStoreFile(vectorStoreFile); assertThrows(HttpResponseException.class, () -> client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(101, 50)) )); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void getVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId); VectorStoreFile vectorStoreFileResponse = client.getVectorStoreFile(storeId, fileId); while (VectorStoreFileStatus.IN_PROGRESS == vectorStoreFileResponse.getStatus()) { vectorStoreFileResponse = client.getVectorStoreFile(storeId, fileId); } assertNotNull(vectorStoreFileResponse); assertEquals(vectorStoreFile.getVectorStoreId(), vectorStoreFileResponse.getVectorStoreId()); assertEquals(vectorStoreFile.getId(), vectorStoreFileResponse.getId()); assertEquals(fileId, vectorStoreFileResponse.getId()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void listVectorStoreFiles(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId); VectorStoreFile vectorStoreFile2 = client.createVectorStoreFile(storeId, fileId2); assertEquals(fileId, vectorStoreFile.getId()); assertEquals(fileId2, vectorStoreFile2.getId()); PageableList<VectorStoreFile> vectorStoreFiles = client.listVectorStoreFiles(storeId); assertNotNull(vectorStoreFiles); assertFalse(vectorStoreFiles.getData().isEmpty()); vectorStoreFiles.getData().forEach(storeFile -> { assertNotNull(storeFile.getId()); assertNotNull(storeFile.getCreatedAt()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void deleteVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId); assertVectorStoreFile(vectorStoreFile); VectorStoreFileDeletionStatus deletionStatus = client.deleteVectorStoreFile(storeId, fileId); assertTrue(deletionStatus.isDeleted()); assertEquals(fileId, deletionStatus.getId()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1))); assertVectorStoreFileBatch(vectorStoreFileBatch, 2); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileBatchWithAutoChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreAutoChunkingStrategyRequest()); assertVectorStoreFileBatch(vectorStoreFileBatch, 2); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileBatchWithStaticChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(101, 50))); assertVectorStoreFileBatch(vectorStoreFileBatch, 2); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void throwExceptionWhenOverrideExistChunkStrategyInBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(101, 50))); assertNotNull(vectorStoreFileBatch); assertThrows(HttpResponseException.class, () -> client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreAutoChunkingStrategyRequest() )); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void getVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)); String batchId = vectorStoreFileBatch.getId(); int totalFileCounts = vectorStoreFileBatch.getFileCounts().getTotal(); VectorStoreFileBatch vectorStoreFileBatchResponse = client.getVectorStoreFileBatch(storeId, batchId); assertEquals(storeId, vectorStoreFileBatchResponse.getVectorStoreId()); assertEquals(batchId, vectorStoreFileBatchResponse.getId()); assertEquals(totalFileCounts, vectorStoreFileBatchResponse.getFileCounts().getTotal()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils @Disabled("This test is failing with 500. The server had an error processing your request. Sorry about that! " + "You can retry your request, or contact us through our help center at oai-assistants@microsoft.com if " + "you keep seeing this error.") public void listVectorStoreFilesBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)); PageableList<VectorStoreFile> vectorStoreFiles = client.listVectorStoreFileBatchFiles(storeId, vectorStoreFileBatch.getId()); assertNotNull(vectorStoreFiles); assertFalse(vectorStoreFiles.getData().isEmpty()); vectorStoreFiles.getData().forEach(vectorStoreFile -> { assertNotNull(vectorStoreFile.getId()); assertNotNull(vectorStoreFile.getCreatedAt()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void cancelVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)); VectorStoreFileBatch cancelVectorStoreFileBatch = client.cancelVectorStoreFileBatch(storeId, vectorStoreFileBatch.getId()); while (VectorStoreFileBatchStatus.IN_PROGRESS == cancelVectorStoreFileBatch.getStatus()) { cancelVectorStoreFileBatch = client.getVectorStoreFileBatch(storeId, vectorStoreFileBatch.getId()); } assertNotNull(vectorStoreFileBatch); assertNotNull(cancelVectorStoreFileBatch); assertEquals(vectorStoreFileBatch.getId(), cancelVectorStoreFileBatch.getId()); assertEquals(vectorStoreFileBatch.getFileCounts().getTotal(), cancelVectorStoreFileBatch.getFileCounts().getTotal()); } }
class AzureVectorStoreSyncTests extends VectorStoreTestBase { private static final ClientLogger LOGGER = new ClientLogger(AzureVectorStoreSyncTests.class); private AssistantsClient client; private VectorStore vectorStore; private List<String> fileIds = new ArrayList<>(); protected void beforeTest(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { client = getAssistantsClient(httpClient, serviceVersion); addFile(ALPHABET_FINANCIAL_STATEMENT); VectorStoreOptions vectorStoreOptions = new VectorStoreOptions() .setName("Financial Statements") .setExpiresAfter(new VectorStoreExpirationPolicy(LAST_ACTIVE_AT, 1)); vectorStore = client.createVectorStore(vectorStoreOptions); assertNotNull(vectorStore); assertNotNull(vectorStore.getId()); } private void addFile(String fileId) { fileIds.add(uploadFile(client, fileId, ASSISTANTS)); } @Override protected void afterTest() { LOGGER.info("Cleaning up created resources."); deleteVectorStores(client, vectorStore.getId()); deleteFiles(client, fileIds.toArray(new String[0])); LOGGER.info("Finished cleaning up resources."); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void updateVectorStoreName(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); modifyVectorStoreRunner(vectorStoreDetails -> { String vectorStoreId = vectorStore.getId(); VectorStore vectorStore = client.modifyVectorStore(vectorStoreId, vectorStoreDetails); assertNotNull(vectorStore); assertEquals(vectorStoreId, vectorStore.getId()); assertEquals(vectorStoreDetails.getName(), vectorStore.getName()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void getVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String vectorStoreId = vectorStore.getId(); VectorStore vectorStore = client.getVectorStore(vectorStoreId); assertNotNull(vectorStore); assertEquals(vectorStoreId, vectorStore.getId()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void listVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); PageableList<VectorStore> vectorStores = client.listVectorStores(); assertNotNull(vectorStores); assertFalse(vectorStores.getData().isEmpty()); vectorStores.getData().forEach(vectorStore -> { assertNotNull(vectorStore.getId()); assertNotNull(vectorStore.getCreatedAt()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0)); assertVectorStoreFile(vectorStoreFile); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileWithStaticChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); int maxChunkSizeTokens = 101; int chunkOverlapTokens = 50; VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(maxChunkSizeTokens, chunkOverlapTokens)) ); assertVectorStoreFile(vectorStoreFile); assertStaticChunkingStrategy(vectorStoreFile, maxChunkSizeTokens, chunkOverlapTokens); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void throwExceptionWhenOverrideExistChunkStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreAutoChunkingStrategyRequest()); assertVectorStoreFile(vectorStoreFile); assertThrows(HttpResponseException.class, () -> client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(101, 50)) )); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void getVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId); VectorStoreFile vectorStoreFileResponse = client.getVectorStoreFile(storeId, fileId); while (VectorStoreFileStatus.IN_PROGRESS == vectorStoreFileResponse.getStatus()) { vectorStoreFileResponse = client.getVectorStoreFile(storeId, fileId); } assertNotNull(vectorStoreFileResponse); assertEquals(vectorStoreFile.getVectorStoreId(), vectorStoreFileResponse.getVectorStoreId()); assertEquals(vectorStoreFile.getId(), vectorStoreFileResponse.getId()); assertEquals(fileId, vectorStoreFileResponse.getId()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void listVectorStoreFiles(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId); VectorStoreFile vectorStoreFile2 = client.createVectorStoreFile(storeId, fileId2); assertEquals(fileId, vectorStoreFile.getId()); assertEquals(fileId2, vectorStoreFile2.getId()); PageableList<VectorStoreFile> vectorStoreFiles = client.listVectorStoreFiles(storeId); assertNotNull(vectorStoreFiles); assertFalse(vectorStoreFiles.getData().isEmpty()); vectorStoreFiles.getData().forEach(storeFile -> { assertNotNull(storeFile.getId()); assertNotNull(storeFile.getCreatedAt()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void deleteVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId); assertVectorStoreFile(vectorStoreFile); VectorStoreFileDeletionStatus deletionStatus = client.deleteVectorStoreFile(storeId, fileId); assertTrue(deletionStatus.isDeleted()); assertEquals(fileId, deletionStatus.getId()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1))); assertVectorStoreFileBatch(vectorStoreFileBatch, 2); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileBatchWithAutoChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreAutoChunkingStrategyRequest()); assertVectorStoreFileBatch(vectorStoreFileBatch, 2); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileBatchWithStaticChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(101, 50))); assertVectorStoreFileBatch(vectorStoreFileBatch, 2); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void throwExceptionWhenOverrideExistChunkStrategyInBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(101, 50))); assertNotNull(vectorStoreFileBatch); assertThrows(HttpResponseException.class, () -> client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreAutoChunkingStrategyRequest() )); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void getVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)); String batchId = vectorStoreFileBatch.getId(); int totalFileCounts = vectorStoreFileBatch.getFileCounts().getTotal(); VectorStoreFileBatch vectorStoreFileBatchResponse = client.getVectorStoreFileBatch(storeId, batchId); assertEquals(storeId, vectorStoreFileBatchResponse.getVectorStoreId()); assertEquals(batchId, vectorStoreFileBatchResponse.getId()); assertEquals(totalFileCounts, vectorStoreFileBatchResponse.getFileCounts().getTotal()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils @Disabled("This test is failing with 500. The server had an error processing your request. Sorry about that! " + "You can retry your request, or contact us through our help center at oai-assistants@microsoft.com if " + "you keep seeing this error.") public void listVectorStoreFilesBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)); PageableList<VectorStoreFile> vectorStoreFiles = client.listVectorStoreFileBatchFiles(storeId, vectorStoreFileBatch.getId()); assertNotNull(vectorStoreFiles); assertFalse(vectorStoreFiles.getData().isEmpty()); vectorStoreFiles.getData().forEach(vectorStoreFile -> { assertNotNull(vectorStoreFile.getId()); assertNotNull(vectorStoreFile.getCreatedAt()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void cancelVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)); VectorStoreFileBatch cancelVectorStoreFileBatch = client.cancelVectorStoreFileBatch(storeId, vectorStoreFileBatch.getId()); while (VectorStoreFileBatchStatus.IN_PROGRESS == cancelVectorStoreFileBatch.getStatus()) { cancelVectorStoreFileBatch = client.getVectorStoreFileBatch(storeId, vectorStoreFileBatch.getId()); } assertNotNull(vectorStoreFileBatch); assertNotNull(cancelVectorStoreFileBatch); assertEquals(vectorStoreFileBatch.getId(), cancelVectorStoreFileBatch.getId()); assertEquals(vectorStoreFileBatch.getFileCounts().getTotal(), cancelVectorStoreFileBatch.getFileCounts().getTotal()); } }
- BeforeEach doesn't take any method signature. (Method 'beforeTest' annotated with '@BeforeEach' should not declare parameter 'httpClient' ) - The client could be either of SyncClient or AsyncClient
public void createVectorStoreFileWithAutoChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreAutoChunkingStrategyRequest()); assertVectorStoreFile(vectorStoreFile); assertStaticChunkingStrategy(vectorStoreFile, 800, 400); }
beforeTest(httpClient, serviceVersion);
public void createVectorStoreFileWithAutoChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreAutoChunkingStrategyRequest()); assertVectorStoreFile(vectorStoreFile); assertStaticChunkingStrategy(vectorStoreFile, 800, 400); }
class AzureVectorStoreSyncTests extends VectorStoreTestBase { private static final ClientLogger LOGGER = new ClientLogger(AzureVectorStoreSyncTests.class); private AssistantsClient client; private VectorStore vectorStore; private List<String> fileIds = new ArrayList<>(); protected void beforeTest(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { client = getAssistantsClient(httpClient, serviceVersion); addFile(ALPHABET_FINANCIAL_STATEMENT); VectorStoreOptions vectorStoreOptions = new VectorStoreOptions() .setName("Financial Statements") .setExpiresAfter(new VectorStoreExpirationPolicy(LAST_ACTIVE_AT, 1)); vectorStore = client.createVectorStore(vectorStoreOptions); assertNotNull(vectorStore); assertNotNull(vectorStore.getId()); } private void addFile(String fileId) { fileIds.add(uploadFile(client, fileId, ASSISTANTS)); } @Override protected void afterTest() { LOGGER.info("Cleaning up created resources."); deleteVectorStores(client, vectorStore.getId()); deleteFiles(client, fileIds.toArray(new String[0])); LOGGER.info("Finished cleaning up resources."); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void updateVectorStoreName(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); modifyVectorStoreRunner(vectorStoreDetails -> { String vectorStoreId = vectorStore.getId(); VectorStore vectorStore = client.modifyVectorStore(vectorStoreId, vectorStoreDetails); assertNotNull(vectorStore); assertEquals(vectorStoreId, vectorStore.getId()); assertEquals(vectorStoreDetails.getName(), vectorStore.getName()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void getVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String vectorStoreId = vectorStore.getId(); VectorStore vectorStore = client.getVectorStore(vectorStoreId); assertNotNull(vectorStore); assertEquals(vectorStoreId, vectorStore.getId()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void listVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); PageableList<VectorStore> vectorStores = client.listVectorStores(); assertNotNull(vectorStores); assertFalse(vectorStores.getData().isEmpty()); vectorStores.getData().forEach(vectorStore -> { assertNotNull(vectorStore.getId()); assertNotNull(vectorStore.getCreatedAt()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0)); assertVectorStoreFile(vectorStoreFile); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileWithStaticChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); int maxChunkSizeTokens = 101; int chunkOverlapTokens = 50; VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(maxChunkSizeTokens, chunkOverlapTokens)) ); assertVectorStoreFile(vectorStoreFile); assertStaticChunkingStrategy(vectorStoreFile, maxChunkSizeTokens, chunkOverlapTokens); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void throwExceptionWhenOverrideExistChunkStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreAutoChunkingStrategyRequest()); assertVectorStoreFile(vectorStoreFile); assertThrows(HttpResponseException.class, () -> client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(101, 50)) )); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void getVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId); VectorStoreFile vectorStoreFileResponse = client.getVectorStoreFile(storeId, fileId); while (VectorStoreFileStatus.IN_PROGRESS == vectorStoreFileResponse.getStatus()) { vectorStoreFileResponse = client.getVectorStoreFile(storeId, fileId); } assertNotNull(vectorStoreFileResponse); assertEquals(vectorStoreFile.getVectorStoreId(), vectorStoreFileResponse.getVectorStoreId()); assertEquals(vectorStoreFile.getId(), vectorStoreFileResponse.getId()); assertEquals(fileId, vectorStoreFileResponse.getId()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void listVectorStoreFiles(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId); VectorStoreFile vectorStoreFile2 = client.createVectorStoreFile(storeId, fileId2); assertEquals(fileId, vectorStoreFile.getId()); assertEquals(fileId2, vectorStoreFile2.getId()); PageableList<VectorStoreFile> vectorStoreFiles = client.listVectorStoreFiles(storeId); assertNotNull(vectorStoreFiles); assertFalse(vectorStoreFiles.getData().isEmpty()); vectorStoreFiles.getData().forEach(storeFile -> { assertNotNull(storeFile.getId()); assertNotNull(storeFile.getCreatedAt()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void deleteVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId); assertVectorStoreFile(vectorStoreFile); VectorStoreFileDeletionStatus deletionStatus = client.deleteVectorStoreFile(storeId, fileId); assertTrue(deletionStatus.isDeleted()); assertEquals(fileId, deletionStatus.getId()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1))); assertVectorStoreFileBatch(vectorStoreFileBatch, 2); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileBatchWithAutoChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreAutoChunkingStrategyRequest()); assertVectorStoreFileBatch(vectorStoreFileBatch, 2); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileBatchWithStaticChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(101, 50))); assertVectorStoreFileBatch(vectorStoreFileBatch, 2); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void throwExceptionWhenOverrideExistChunkStrategyInBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(101, 50))); assertNotNull(vectorStoreFileBatch); assertThrows(HttpResponseException.class, () -> client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreAutoChunkingStrategyRequest() )); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void getVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)); String batchId = vectorStoreFileBatch.getId(); int totalFileCounts = vectorStoreFileBatch.getFileCounts().getTotal(); VectorStoreFileBatch vectorStoreFileBatchResponse = client.getVectorStoreFileBatch(storeId, batchId); assertEquals(storeId, vectorStoreFileBatchResponse.getVectorStoreId()); assertEquals(batchId, vectorStoreFileBatchResponse.getId()); assertEquals(totalFileCounts, vectorStoreFileBatchResponse.getFileCounts().getTotal()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils @Disabled("This test is failing with 500. The server had an error processing your request. Sorry about that! " + "You can retry your request, or contact us through our help center at oai-assistants@microsoft.com if " + "you keep seeing this error.") public void listVectorStoreFilesBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)); PageableList<VectorStoreFile> vectorStoreFiles = client.listVectorStoreFileBatchFiles(storeId, vectorStoreFileBatch.getId()); assertNotNull(vectorStoreFiles); assertFalse(vectorStoreFiles.getData().isEmpty()); vectorStoreFiles.getData().forEach(vectorStoreFile -> { assertNotNull(vectorStoreFile.getId()); assertNotNull(vectorStoreFile.getCreatedAt()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void cancelVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)); VectorStoreFileBatch cancelVectorStoreFileBatch = client.cancelVectorStoreFileBatch(storeId, vectorStoreFileBatch.getId()); while (VectorStoreFileBatchStatus.IN_PROGRESS == cancelVectorStoreFileBatch.getStatus()) { cancelVectorStoreFileBatch = client.getVectorStoreFileBatch(storeId, vectorStoreFileBatch.getId()); } assertNotNull(vectorStoreFileBatch); assertNotNull(cancelVectorStoreFileBatch); assertEquals(vectorStoreFileBatch.getId(), cancelVectorStoreFileBatch.getId()); assertEquals(vectorStoreFileBatch.getFileCounts().getTotal(), cancelVectorStoreFileBatch.getFileCounts().getTotal()); } }
class AzureVectorStoreSyncTests extends VectorStoreTestBase { private static final ClientLogger LOGGER = new ClientLogger(AzureVectorStoreSyncTests.class); private AssistantsClient client; private VectorStore vectorStore; private List<String> fileIds = new ArrayList<>(); protected void beforeTest(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { client = getAssistantsClient(httpClient, serviceVersion); addFile(ALPHABET_FINANCIAL_STATEMENT); VectorStoreOptions vectorStoreOptions = new VectorStoreOptions() .setName("Financial Statements") .setExpiresAfter(new VectorStoreExpirationPolicy(LAST_ACTIVE_AT, 1)); vectorStore = client.createVectorStore(vectorStoreOptions); assertNotNull(vectorStore); assertNotNull(vectorStore.getId()); } private void addFile(String fileId) { fileIds.add(uploadFile(client, fileId, ASSISTANTS)); } @Override protected void afterTest() { LOGGER.info("Cleaning up created resources."); deleteVectorStores(client, vectorStore.getId()); deleteFiles(client, fileIds.toArray(new String[0])); LOGGER.info("Finished cleaning up resources."); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void updateVectorStoreName(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); modifyVectorStoreRunner(vectorStoreDetails -> { String vectorStoreId = vectorStore.getId(); VectorStore vectorStore = client.modifyVectorStore(vectorStoreId, vectorStoreDetails); assertNotNull(vectorStore); assertEquals(vectorStoreId, vectorStore.getId()); assertEquals(vectorStoreDetails.getName(), vectorStore.getName()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void getVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String vectorStoreId = vectorStore.getId(); VectorStore vectorStore = client.getVectorStore(vectorStoreId); assertNotNull(vectorStore); assertEquals(vectorStoreId, vectorStore.getId()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void listVectorStore(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); PageableList<VectorStore> vectorStores = client.listVectorStores(); assertNotNull(vectorStores); assertFalse(vectorStores.getData().isEmpty()); vectorStores.getData().forEach(vectorStore -> { assertNotNull(vectorStore.getId()); assertNotNull(vectorStore.getCreatedAt()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0)); assertVectorStoreFile(vectorStoreFile); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileWithStaticChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); int maxChunkSizeTokens = 101; int chunkOverlapTokens = 50; VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(maxChunkSizeTokens, chunkOverlapTokens)) ); assertVectorStoreFile(vectorStoreFile); assertStaticChunkingStrategy(vectorStoreFile, maxChunkSizeTokens, chunkOverlapTokens); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void throwExceptionWhenOverrideExistChunkStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreAutoChunkingStrategyRequest()); assertVectorStoreFile(vectorStoreFile); assertThrows(HttpResponseException.class, () -> client.createVectorStoreFile(vectorStore.getId(), fileIds.get(0), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(101, 50)) )); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void getVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId); VectorStoreFile vectorStoreFileResponse = client.getVectorStoreFile(storeId, fileId); while (VectorStoreFileStatus.IN_PROGRESS == vectorStoreFileResponse.getStatus()) { vectorStoreFileResponse = client.getVectorStoreFile(storeId, fileId); } assertNotNull(vectorStoreFileResponse); assertEquals(vectorStoreFile.getVectorStoreId(), vectorStoreFileResponse.getVectorStoreId()); assertEquals(vectorStoreFile.getId(), vectorStoreFileResponse.getId()); assertEquals(fileId, vectorStoreFileResponse.getId()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void listVectorStoreFiles(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId); VectorStoreFile vectorStoreFile2 = client.createVectorStoreFile(storeId, fileId2); assertEquals(fileId, vectorStoreFile.getId()); assertEquals(fileId2, vectorStoreFile2.getId()); PageableList<VectorStoreFile> vectorStoreFiles = client.listVectorStoreFiles(storeId); assertNotNull(vectorStoreFiles); assertFalse(vectorStoreFiles.getData().isEmpty()); vectorStoreFiles.getData().forEach(storeFile -> { assertNotNull(storeFile.getId()); assertNotNull(storeFile.getCreatedAt()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void deleteVectorStoreFile(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); VectorStoreFile vectorStoreFile = client.createVectorStoreFile(storeId, fileId); assertVectorStoreFile(vectorStoreFile); VectorStoreFileDeletionStatus deletionStatus = client.deleteVectorStoreFile(storeId, fileId); assertTrue(deletionStatus.isDeleted()); assertEquals(fileId, deletionStatus.getId()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1))); assertVectorStoreFileBatch(vectorStoreFileBatch, 2); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileBatchWithAutoChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreAutoChunkingStrategyRequest()); assertVectorStoreFileBatch(vectorStoreFileBatch, 2); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void createVectorStoreFileBatchWithStaticChunkingStrategy(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(101, 50))); assertVectorStoreFileBatch(vectorStoreFileBatch, 2); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void throwExceptionWhenOverrideExistChunkStrategyInBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); addFile(APPLE_FINANCIAL_STATEMENT); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreStaticChunkingStrategyRequest( new VectorStoreStaticChunkingStrategyOptions(101, 50))); assertNotNull(vectorStoreFileBatch); assertThrows(HttpResponseException.class, () -> client.createVectorStoreFileBatch(vectorStore.getId(), Arrays.asList(fileIds.get(0), fileIds.get(1)), new VectorStoreAutoChunkingStrategyRequest() )); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void getVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)); String batchId = vectorStoreFileBatch.getId(); int totalFileCounts = vectorStoreFileBatch.getFileCounts().getTotal(); VectorStoreFileBatch vectorStoreFileBatchResponse = client.getVectorStoreFileBatch(storeId, batchId); assertEquals(storeId, vectorStoreFileBatchResponse.getVectorStoreId()); assertEquals(batchId, vectorStoreFileBatchResponse.getId()); assertEquals(totalFileCounts, vectorStoreFileBatchResponse.getFileCounts().getTotal()); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils @Disabled("This test is failing with 500. The server had an error processing your request. Sorry about that! " + "You can retry your request, or contact us through our help center at oai-assistants@microsoft.com if " + "you keep seeing this error.") public void listVectorStoreFilesBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)); PageableList<VectorStoreFile> vectorStoreFiles = client.listVectorStoreFileBatchFiles(storeId, vectorStoreFileBatch.getId()); assertNotNull(vectorStoreFiles); assertFalse(vectorStoreFiles.getData().isEmpty()); vectorStoreFiles.getData().forEach(vectorStoreFile -> { assertNotNull(vectorStoreFile.getId()); assertNotNull(vectorStoreFile.getCreatedAt()); }); } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.ai.openai.assistants.TestUtils public void cancelVectorStoreFileBatch(HttpClient httpClient, AssistantsServiceVersion serviceVersion) { beforeTest(httpClient, serviceVersion); String storeId = vectorStore.getId(); String fileId = fileIds.get(0); String fileId2 = uploadFile(client, "20220924_aapl_10k.pdf", ASSISTANTS); fileIds.add(fileId2); VectorStoreFileBatch vectorStoreFileBatch = client.createVectorStoreFileBatch(storeId, Arrays.asList(fileId, fileId2)); VectorStoreFileBatch cancelVectorStoreFileBatch = client.cancelVectorStoreFileBatch(storeId, vectorStoreFileBatch.getId()); while (VectorStoreFileBatchStatus.IN_PROGRESS == cancelVectorStoreFileBatch.getStatus()) { cancelVectorStoreFileBatch = client.getVectorStoreFileBatch(storeId, vectorStoreFileBatch.getId()); } assertNotNull(vectorStoreFileBatch); assertNotNull(cancelVectorStoreFileBatch); assertEquals(vectorStoreFileBatch.getId(), cancelVectorStoreFileBatch.getId()); assertEquals(vectorStoreFileBatch.getFileCounts().getTotal(), cancelVectorStoreFileBatch.getFileCounts().getTotal()); } }
then I am wondering do we need to introduce a new system environment variable? or this new variable is only targeted for test?
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) { StopWatch stopwatch = new StopWatch(); stopwatch.start(); if (Configs.shouldOptInDefaultCircuitBreakerConfig()) { if (Configs.shouldOptInDefaultCircuitBreakerConfig()) { System.setProperty("COSMOS.PARTITION_LEVEL_CIRCUIT_BREAKER_CONFIG", "{\"isPartitionLevelCircuitBreakerEnabled\": true}"); } } this.resetSessionCapturingType(); validateConfig(); buildConnectionPolicy(); CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this); if (proactiveContainerInitConfig != null) { cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities()); Duration aggressiveWarmupDuration = proactiveContainerInitConfig .getAggressiveWarmupDuration(); if (aggressiveWarmupDuration != null) { cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration); } else { cosmosAsyncClient.openConnectionsAndInitCaches(); } cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities()); } else { cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>()); } if (logStartupInfo) { logStartupInfo(stopwatch, cosmosAsyncClient); } return cosmosAsyncClient; }
if (Configs.shouldOptInDefaultCircuitBreakerConfig()) {
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) { StopWatch stopwatch = new StopWatch(); stopwatch.start(); if (Configs.shouldOptInDefaultCircuitBreakerConfig()) { System.setProperty("COSMOS.PARTITION_LEVEL_CIRCUIT_BREAKER_CONFIG", "{\"isPartitionLevelCircuitBreakerEnabled\": true}"); } this.resetSessionCapturingType(); validateConfig(); buildConnectionPolicy(); CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this); if (proactiveContainerInitConfig != null) { cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities()); Duration aggressiveWarmupDuration = proactiveContainerInitConfig .getAggressiveWarmupDuration(); if (aggressiveWarmupDuration != null) { cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration); } else { cosmosAsyncClient.openConnectionsAndInitCaches(); } cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities()); } else { cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>()); } if (logStartupInfo) { logStartupInfo(stopwatch, cosmosAsyncClient); } return cosmosAsyncClient; }
class to instantiate {@link CosmosContainerProactiveInitConfig}
class to instantiate {@link CosmosContainerProactiveInitConfig}
Only targeted for tests - setting a JSON environment variable value through command-line in `live-platform-matrix.json` never seemed to work for some reason. This seemed like a simple compromise and the newly added system property is hidden. Let me know if you disagree - I can investigate how to set it as JSON directly.
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) { StopWatch stopwatch = new StopWatch(); stopwatch.start(); if (Configs.shouldOptInDefaultCircuitBreakerConfig()) { if (Configs.shouldOptInDefaultCircuitBreakerConfig()) { System.setProperty("COSMOS.PARTITION_LEVEL_CIRCUIT_BREAKER_CONFIG", "{\"isPartitionLevelCircuitBreakerEnabled\": true}"); } } this.resetSessionCapturingType(); validateConfig(); buildConnectionPolicy(); CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this); if (proactiveContainerInitConfig != null) { cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities()); Duration aggressiveWarmupDuration = proactiveContainerInitConfig .getAggressiveWarmupDuration(); if (aggressiveWarmupDuration != null) { cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration); } else { cosmosAsyncClient.openConnectionsAndInitCaches(); } cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities()); } else { cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>()); } if (logStartupInfo) { logStartupInfo(stopwatch, cosmosAsyncClient); } return cosmosAsyncClient; }
if (Configs.shouldOptInDefaultCircuitBreakerConfig()) {
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) { StopWatch stopwatch = new StopWatch(); stopwatch.start(); if (Configs.shouldOptInDefaultCircuitBreakerConfig()) { System.setProperty("COSMOS.PARTITION_LEVEL_CIRCUIT_BREAKER_CONFIG", "{\"isPartitionLevelCircuitBreakerEnabled\": true}"); } this.resetSessionCapturingType(); validateConfig(); buildConnectionPolicy(); CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this); if (proactiveContainerInitConfig != null) { cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities()); Duration aggressiveWarmupDuration = proactiveContainerInitConfig .getAggressiveWarmupDuration(); if (aggressiveWarmupDuration != null) { cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration); } else { cosmosAsyncClient.openConnectionsAndInitCaches(); } cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities()); } else { cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>()); } if (logStartupInfo) { logStartupInfo(stopwatch, cosmosAsyncClient); } return cosmosAsyncClient; }
class to instantiate {@link CosmosContainerProactiveInitConfig}
class to instantiate {@link CosmosContainerProactiveInitConfig}
Synced up offline - concluded to keep the change as is.
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) { StopWatch stopwatch = new StopWatch(); stopwatch.start(); if (Configs.shouldOptInDefaultCircuitBreakerConfig()) { if (Configs.shouldOptInDefaultCircuitBreakerConfig()) { System.setProperty("COSMOS.PARTITION_LEVEL_CIRCUIT_BREAKER_CONFIG", "{\"isPartitionLevelCircuitBreakerEnabled\": true}"); } } this.resetSessionCapturingType(); validateConfig(); buildConnectionPolicy(); CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this); if (proactiveContainerInitConfig != null) { cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities()); Duration aggressiveWarmupDuration = proactiveContainerInitConfig .getAggressiveWarmupDuration(); if (aggressiveWarmupDuration != null) { cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration); } else { cosmosAsyncClient.openConnectionsAndInitCaches(); } cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities()); } else { cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>()); } if (logStartupInfo) { logStartupInfo(stopwatch, cosmosAsyncClient); } return cosmosAsyncClient; }
if (Configs.shouldOptInDefaultCircuitBreakerConfig()) {
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) { StopWatch stopwatch = new StopWatch(); stopwatch.start(); if (Configs.shouldOptInDefaultCircuitBreakerConfig()) { System.setProperty("COSMOS.PARTITION_LEVEL_CIRCUIT_BREAKER_CONFIG", "{\"isPartitionLevelCircuitBreakerEnabled\": true}"); } this.resetSessionCapturingType(); validateConfig(); buildConnectionPolicy(); CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this); if (proactiveContainerInitConfig != null) { cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities()); Duration aggressiveWarmupDuration = proactiveContainerInitConfig .getAggressiveWarmupDuration(); if (aggressiveWarmupDuration != null) { cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration); } else { cosmosAsyncClient.openConnectionsAndInitCaches(); } cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities()); } else { cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>()); } if (logStartupInfo) { logStartupInfo(stopwatch, cosmosAsyncClient); } return cosmosAsyncClient; }
class to instantiate {@link CosmosContainerProactiveInitConfig}
class to instantiate {@link CosmosContainerProactiveInitConfig}
Is there a reason we can't just use json instead of creating POJOs. It seems an unneeded step. And will currently break if any new fields are added.
private List<ValidationTestCase> readTestcasesFromFile(File testFile) throws IOException { final String jsonString = Files.readString(testFile.toPath()); final CollectionType typeReference = TypeFactory.defaultInstance().constructCollectionType(List.class, ValidationTestCase.class); return objectMapper.readValue(jsonString, typeReference); }
return objectMapper.readValue(jsonString, typeReference);
private List<ValidationTestCase> readTestcasesFromFile(File testFile) throws IOException { final String jsonString = Files.readString(testFile.toPath()); final CollectionType typeReference = TypeFactory.defaultInstance().constructCollectionType(List.class, ValidationTestCase.class); return OBJECT_MAPPER.readValue(jsonString, typeReference); }
class ValidationsTest { @Mock private ApplicationContext context; @Mock private FeatureManagementConfigProperties configProperties; private final ObjectMapper objectMapper = JsonMapper.builder() .configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true).build(); private final String testCaseFolderPath = "validations-tests"; private final String inputsUser = "user"; private final String inputsGroups = "groups"; private final String sampleFileNameFilter = "sample"; private final String testsFileNameFilter = "tests"; @BeforeEach public void setup() { MockitoAnnotations.openMocks(this); when(configProperties.isFailFast()).thenReturn(true); when(context.getBean(Mockito.contains("TimeWindow"))).thenReturn(new TimeWindowFilter()); } @AfterEach public void cleanup() throws Exception { MockitoAnnotations.openMocks(this).close(); } private boolean hasException(ValidationTestCase testCase) { final String exceptionStr = testCase.getIsEnabled().getException(); return exceptionStr != null && !exceptionStr.isEmpty(); } private boolean hasInput(ValidationTestCase testCase) { final LinkedHashMap<String, Object> inputsMap = testCase.getInputs(); return inputsMap != null && !inputsMap.isEmpty(); } private File[] getFileList(String fileNameFilter) { final URL folderUrl = Thread.currentThread().getContextClassLoader().getResource(testCaseFolderPath); assert folderUrl != null; final File folderFile = new File(folderUrl.getFile()); final File[] filteredFiles = folderFile.listFiles(pathname -> pathname.getName().toLowerCase().contains(fileNameFilter)); assert filteredFiles != null; Arrays.sort(filteredFiles, Comparator.comparing(File::getName)); return filteredFiles; } @SuppressWarnings("unchecked") private LinkedHashMap<String, Object> readConfigurationFromFile(File sampleFile) throws IOException { final String jsonString = Files.readString(sampleFile.toPath()); final LinkedHashMap<String, Object> configurations = objectMapper.readValue(jsonString, new TypeReference<>() { }); final Object featureManagementSection = configurations.get("feature_management"); if (featureManagementSection.getClass().isAssignableFrom(LinkedHashMap.class)) { return (LinkedHashMap<String, Object>) featureManagementSection; } return new LinkedHashMap<>(); } @SuppressWarnings("unchecked") private void runTestcases(File sampleFile, File testsFile) throws IOException { final FeatureManagementProperties managementProperties = new FeatureManagementProperties(); managementProperties.putAll(readConfigurationFromFile(sampleFile)); final FeatureManager featureManager = new FeatureManager(context, managementProperties, configProperties); final List<ValidationTestCase> testCases = readTestcasesFromFile(testsFile); System.out.println("Running test case from file: " + testsFile.getName()); for (int i = 0; i < testCases.size(); i++) { System.out.println("Test case " + i + " : " + testCases.get(i).getDescription()); if (hasException(testCases.get(i))) { assertNull(managementProperties.getOnOff().get(testCases.get(i).getFeatureFlagName())); } else { if (hasInput(testCases.get(i))) { final Object userObj = testCases.get(i).getInputs().get(inputsUser); final Object groupsObj = testCases.get(i).getInputs().get(inputsGroups); final String user = userObj != null ? userObj.toString() : null; final List<String> groups = groupsObj != null ? (List<String>) groupsObj : null; when(context.getBean(Mockito.contains("Targeting"))).thenReturn(new TargetingFilter(new TargetingFilterTestContextAccessor(user, groups))); } final Boolean result = featureManager.isEnabled(testCases.get(i).getFeatureFlagName()); assertEquals(result.toString(), testCases.get(i).getIsEnabled().getResult()); } } } @Test void validationsTest() throws IOException { final File[] sampleFiles = getFileList(sampleFileNameFilter); final File[] testsFiles = getFileList(testsFileNameFilter); if (sampleFiles.length != testsFiles.length) { throw new IllegalArgumentException("The sample files and tests files should have same count."); } for (int i = 0; i < sampleFiles.length; i++) { if (sampleFiles[i].getName().contains("TargetingFilter.sample")) { continue; } runTestcases(sampleFiles[i], testsFiles[i]); } } }
class ValidationsTest { @Mock private ApplicationContext context; @Mock private FeatureManagementConfigProperties configProperties; private static final Logger LOGGER = LoggerFactory.getLogger(ValidationsTest.class); private static final ObjectMapper OBJECT_MAPPER = JsonMapper.builder() .configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true).build(); private static final String TEST_CASE_FOLDER_PATH = "validations-tests"; private final String inputsUser = "user"; private final String inputsGroups = "groups"; private static final String SAMPLE_FILE_NAME_FILTER = "sample"; private static final String TESTS_FILE_NAME_FILTER = "tests"; @BeforeEach public void setup() { MockitoAnnotations.openMocks(this); when(configProperties.isFailFast()).thenReturn(true); when(context.getBean(Mockito.contains("TimeWindow"))).thenReturn(new TimeWindowFilter()); } @AfterEach public void cleanup() throws Exception { MockitoAnnotations.openMocks(this).close(); } private boolean hasException(ValidationTestCase testCase) { final String exceptionStr = testCase.getIsEnabled().getException(); return exceptionStr != null && !exceptionStr.isEmpty(); } private boolean hasInput(ValidationTestCase testCase) { final LinkedHashMap<String, Object> inputsMap = testCase.getInputs(); return inputsMap != null && !inputsMap.isEmpty(); } private static File[] getFileList(String fileNameFilter) { final URL folderUrl = Thread.currentThread().getContextClassLoader().getResource(TEST_CASE_FOLDER_PATH); assert folderUrl != null; final File folderFile = new File(folderUrl.getFile()); final File[] filteredFiles = folderFile .listFiles(pathname -> pathname.getName().toLowerCase().contains(fileNameFilter)); assert filteredFiles != null; Arrays.sort(filteredFiles, Comparator.comparing(File::getName)); return filteredFiles; } @SuppressWarnings("unchecked") private static LinkedHashMap<String, Object> readConfigurationFromFile(File sampleFile) throws IOException { final String jsonString = Files.readString(sampleFile.toPath()); final LinkedHashMap<String, Object> configurations = OBJECT_MAPPER.readValue(jsonString, new TypeReference<>() { }); final Object featureManagementSection = configurations.get("feature_management"); if (featureManagementSection.getClass().isAssignableFrom(LinkedHashMap.class)) { return (LinkedHashMap<String, Object>) featureManagementSection; } throw new IllegalArgumentException("feature_management part is not a map"); } static Stream<Arguments> testProvider() throws IOException { List<Arguments> arguments = new ArrayList<>(); File[] files = getFileList(TESTS_FILE_NAME_FILTER); final File[] sampleFiles = getFileList(SAMPLE_FILE_NAME_FILTER); List<FeatureManagementProperties> properties = new ArrayList<>(); for (File sampleFile : sampleFiles) { final FeatureManagementProperties managementProperties = new FeatureManagementProperties(); managementProperties.putAll(readConfigurationFromFile(sampleFile)); properties.add(managementProperties); } for (int i = 0; i < files.length; i++) { if (files[i].getName().contains(("TargetingFilter"))) { continue; } arguments.add(Arguments.of(files[i].getName(), files[i], properties.get(i))); } return arguments.stream(); } @ParameterizedTest(name = "{0}") @MethodSource("testProvider") void validationTest(String name, File testsFile, FeatureManagementProperties managementProperties) throws IOException { LOGGER.debug("Running test case from file: " + name); final FeatureManager featureManager = new FeatureManager(context, managementProperties, configProperties); List<ValidationTestCase> testCases = readTestcasesFromFile(testsFile); for (ValidationTestCase testCase : testCases) { LOGGER.debug("Test case : " + testCase.getDescription()); if (hasException(testCase)) { assertNull(managementProperties.getOnOff().get(testCase.getFeatureFlagName())); continue; } if (hasInput(testCase)) { final Object userObj = testCase.getInputs().get(inputsUser); final Object groupsObj = testCase.getInputs().get(inputsGroups); final String user = userObj != null ? userObj.toString() : null; @SuppressWarnings("unchecked") final List<String> groups = groupsObj != null ? (List<String>) groupsObj : null; when(context.getBean(Mockito.contains("Targeting"))) .thenReturn(new TargetingFilter(new TargetingFilterTestContextAccessor(user, groups))); } final Boolean result = featureManager.isEnabled(testCase.getFeatureFlagName()); assertEquals(result.toString(), testCase.getIsEnabled().getResult()); } } }
should we put these into the cases using the bootstrap context, instead of putting them in @BeforeEach section?
void beforeEach() { context = mock(ConfigurableBootstrapContext.class); TokenCredential tokenCredential = mock(TokenCredential.class); when(context.get(TokenCredential.class)).thenReturn(tokenCredential); processor = spy(new KeyVaultEnvironmentPostProcessor(new DeferredLogs(), null)); environment = new MockEnvironment(); propertySources = environment.getPropertySources(); SecretClient secretClient = mock(SecretClient.class); doReturn(secretClient).when(processor).buildSecretClient(any(AzureKeyVaultSecretProperties.class)); }
context = mock(ConfigurableBootstrapContext.class);
void beforeEach() { processor = spy(new KeyVaultEnvironmentPostProcessor(new DeferredLogs(), null)); environment = new MockEnvironment(); propertySources = environment.getPropertySources(); SecretClient secretClient = mock(SecretClient.class); doReturn(secretClient).when(processor).buildSecretClient(any(AzureKeyVaultSecretProperties.class)); }
class KeyVaultEnvironmentPostProcessorTests { private static final String NAME_0 = "name_0"; private static final String NAME_1 = "name_1"; private static final String ENDPOINT_0 = "https: private static final String ENDPOINT_1 = "https: private final SpringApplication application = new SpringApplication(); private KeyVaultEnvironmentPostProcessor processor; private MockEnvironment environment; private MutablePropertySources propertySources; private ConfigurableBootstrapContext context; @BeforeEach @Test void testContextRegister_withTokenCredentialRegistered() { when(context.isRegistered(TokenCredential.class)).thenReturn(true); processor = spy(new KeyVaultEnvironmentPostProcessor(new DeferredLogs(), context)); assertThrows(IllegalStateException.class, () -> processor.buildSecretClient(new AzureKeyVaultSecretProperties())); verify(context, times(1)).get(TokenCredential.class); } @Test void testContextRegister_withoutTokenCredentialRegistered() { when(context.isRegistered(TokenCredential.class)).thenReturn(false); processor = spy(new KeyVaultEnvironmentPostProcessor(new DeferredLogs(), context)); assertThrows(IllegalStateException.class, () -> processor.buildSecretClient(new AzureKeyVaultSecretProperties())); verify(context, never()).get(TokenCredential.class); } @Test void postProcessorHasConfiguredOrder() { final KeyVaultEnvironmentPostProcessor processor = new KeyVaultEnvironmentPostProcessor(new DeferredLogs(), null); assertEquals(processor.getOrder(), KeyVaultEnvironmentPostProcessor.ORDER); } @Test void insertSinglePropertySourceTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); processor.postProcessEnvironment(environment, application); assertTrue(propertySources.contains(NAME_0)); } @Test void insertMultiplePropertySourceTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].name", NAME_1); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); assertTrue(propertySources.contains(NAME_0)); assertTrue(propertySources.contains(NAME_1)); } @Test void keyVaultClientNotExistInClassPathTest() { try (MockedStatic<ClassUtils> classUtils = mockStatic(ClassUtils.class)) { classUtils.when(() -> ClassUtils.isPresent("com.azure.security.keyvault.secrets.SecretClient", getClass().getClassLoader())) .thenReturn(false); environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); processor.postProcessEnvironment(environment, application); assertFalse(propertySources.contains(NAME_0)); } } @Test void disableAllPropertySourceTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "false"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].name", NAME_1); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); assertFalse(propertySources.contains(NAME_0)); assertFalse(propertySources.contains(NAME_1)); } @Test void emptyPropertySourceListTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); processor.postProcessEnvironment(environment, application); assertEquals(1, propertySources.size()); } @Test void disableSpecificOnePropertySourceTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "false"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].name", NAME_1); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); assertFalse(propertySources.contains(NAME_0)); assertTrue(propertySources.contains(NAME_1)); } @Test void enableByDefaultTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].name", NAME_1); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); assertTrue(propertySources.contains(NAME_0)); assertTrue(propertySources.contains(NAME_1)); } @Test void endPointNotConfiguredTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].name", NAME_1); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); assertFalse(propertySources.contains(NAME_0)); assertTrue(propertySources.contains(NAME_1)); } @Test void defaultPropertySourceNameTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); assertTrue(propertySources.contains(processor.buildPropertySourceName(0))); assertTrue(propertySources.contains(processor.buildPropertySourceName(1))); } @Test void keyVaultPropertySourceHasHighestPriorityIfEnvironmentPropertySourceNotExistTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); processor.postProcessEnvironment(environment, application); Iterator<PropertySource<?>> iterator = propertySources.iterator(); assertEquals(NAME_0, iterator.next().getName()); assertTrue(iterator.hasNext()); } @Test void keyVaultPropertySourceHasLowerPriorityThanEnvironmentPropertySourceTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); propertySources.addFirst(new SystemEnvironmentPropertySource(SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME, Collections.emptyMap())); processor.postProcessEnvironment(environment, application); Iterator<PropertySource<?>> iterator = propertySources.iterator(); while (iterator.hasNext()) { PropertySource<?> propertySource = iterator.next(); if (SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME.equals(propertySource.getName())) { break; } } assertEquals(NAME_0, iterator.next().getName()); assertTrue(iterator.hasNext()); } @Test void keyVaultPropertySourceOrderTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].name", NAME_1); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); Iterator<PropertySource<?>> iterator = propertySources.iterator(); assertEquals(NAME_0, iterator.next().getName()); assertEquals(NAME_1, iterator.next().getName()); assertTrue(iterator.hasNext()); } @Test void globalPropertiesTakeEffectIfSpecificPropertiesNotSetTest() { final String globalHostname = "globalHostname"; final String globalApplicationId = "globalApplicationId"; final String globalTenantId = "globalTenantId"; final String globalUsername = "globalUsername"; final int globalMaxRetries = 1; environment.setProperty("spring.cloud.azure.client.application-id", globalApplicationId); environment.setProperty("spring.cloud.azure.credential.username", globalUsername); environment.setProperty("spring.cloud.azure.profile.tenant-id", globalTenantId); environment.setProperty("spring.cloud.azure.proxy.hostname", globalHostname); environment.setProperty("spring.cloud.azure.retry.fixed.max-retries", "" + globalMaxRetries); environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); AzureKeyVaultSecretProperties secretProperties = processor.loadProperties(environment); AzureKeyVaultPropertySourceProperties properties = secretProperties.getPropertySources().get(0); assertEquals(globalUsername, properties.getCredential().getUsername()); assertEquals(globalApplicationId, properties.getClient().getApplicationId()); assertEquals(globalTenantId, properties.getProfile().getTenantId()); assertEquals(globalHostname, properties.getProxy().getHostname()); assertEquals(globalMaxRetries, properties.getRetry().getFixed().getMaxRetries()); } @Test void specificPropertiesHasHigherPriorityThanGlobalPropertiesTest() { final String globalHostname = "globalHostname"; final String globalApplicationId = "globalApplicationId"; final String globalTenantId = "globalTenantId"; final String globalUsername = "globalUsername"; final int globalMaxRetries = 1; final String specificHostname = "specificHostname"; final String specificApplicationId = "specificApplicationId"; final String specificTenantId = "specificTenantId"; final String specificUsername = "specificUsername"; final int specificMaxRetries = 2; environment.setProperty("spring.cloud.azure.client.application-id", globalApplicationId); environment.setProperty("spring.cloud.azure.credential.username", globalUsername); environment.setProperty("spring.cloud.azure.profile.tenant-id", globalTenantId); environment.setProperty("spring.cloud.azure.proxy.hostname", globalHostname); environment.setProperty("spring.cloud.azure.retry.fixed.max-retries", "" + globalMaxRetries); environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].client.application-id", specificApplicationId); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].credential.username", specificUsername); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].profile.tenant-id", specificTenantId); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].proxy.hostname", specificHostname); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].retry.fixed.max-retries", "" + specificMaxRetries); AzureKeyVaultSecretProperties secretProperties = processor.loadProperties(environment); AzureKeyVaultPropertySourceProperties properties = secretProperties.getPropertySources().get(0); assertEquals(specificUsername, properties.getCredential().getUsername()); assertEquals(specificApplicationId, properties.getClient().getApplicationId()); assertEquals(specificTenantId, properties.getProfile().getTenantId()); assertEquals(specificHostname, properties.getProxy().getHostname()); assertEquals(specificMaxRetries, properties.getRetry().getFixed().getMaxRetries()); } @Test void challengeResourceVerificationEnabledCanBeSetAsFalseTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].challenge-resource-verification-enabled", "false"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); AzureKeyVaultSecretProperties secretProperties = processor.loadProperties(environment); AzureKeyVaultPropertySourceProperties properties = secretProperties.getPropertySources().get(0); assertTrue(secretProperties.isChallengeResourceVerificationEnabled()); assertFalse(properties.isChallengeResourceVerificationEnabled()); } @Test void challengeResourceVerificationEnabledIsSetByDefaultTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); AzureKeyVaultSecretProperties secretProperties = processor.loadProperties(environment); AzureKeyVaultPropertySourceProperties properties = secretProperties.getPropertySources().get(0); assertTrue(secretProperties.isChallengeResourceVerificationEnabled()); assertTrue(properties.isChallengeResourceVerificationEnabled()); } @Disabled("Disable it to unblock Azure Dev Ops pipeline: https: @Test void buildKeyVaultPropertySourceWithExceptionTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); assertThrows(IllegalStateException.class, () -> new KeyVaultEnvironmentPostProcessor(new DeferredLogs(), null).postProcessEnvironment(environment, application)); } }
class KeyVaultEnvironmentPostProcessorTests { private static final String NAME_0 = "name_0"; private static final String NAME_1 = "name_1"; private static final String ENDPOINT_0 = "https: private static final String ENDPOINT_1 = "https: private final SpringApplication application = new SpringApplication(); private KeyVaultEnvironmentPostProcessor processor; private MockEnvironment environment; private MutablePropertySources propertySources; private ConfigurableBootstrapContext context; @BeforeEach @Test void testContextRegisterWithTokenCredentialRegistered() { context = mock(ConfigurableBootstrapContext.class); TokenCredential tokenCredential = mock(TokenCredential.class); when(context.get(TokenCredential.class)).thenReturn(tokenCredential); when(context.isRegistered(TokenCredential.class)).thenReturn(true); processor = spy(new KeyVaultEnvironmentPostProcessor(new DeferredLogs(), context)); AzureKeyVaultSecretProperties secretProperties = new AzureKeyVaultSecretProperties(); secretProperties.setEndpoint(ENDPOINT_0); processor.buildSecretClient(secretProperties); verify(context, times(1)).get(TokenCredential.class); } @Test void testContextRegisterWithoutTokenCredentialRegistered() { context = mock(ConfigurableBootstrapContext.class); TokenCredential tokenCredential = mock(TokenCredential.class); when(context.get(TokenCredential.class)).thenReturn(tokenCredential); when(context.isRegistered(TokenCredential.class)).thenReturn(false); processor = spy(new KeyVaultEnvironmentPostProcessor(new DeferredLogs(), context)); AzureKeyVaultSecretProperties secretProperties = new AzureKeyVaultSecretProperties(); secretProperties.setEndpoint(ENDPOINT_0); processor.buildSecretClient(secretProperties); verify(context, never()).get(TokenCredential.class); } @Test void postProcessorHasConfiguredOrder() { final KeyVaultEnvironmentPostProcessor processor = new KeyVaultEnvironmentPostProcessor(new DeferredLogs(), null); assertEquals(processor.getOrder(), KeyVaultEnvironmentPostProcessor.ORDER); } @Test void insertSinglePropertySourceTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); processor.postProcessEnvironment(environment, application); assertTrue(propertySources.contains(NAME_0)); } @Test void insertMultiplePropertySourceTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].name", NAME_1); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); assertTrue(propertySources.contains(NAME_0)); assertTrue(propertySources.contains(NAME_1)); } @Test void keyVaultClientNotExistInClassPathTest() { try (MockedStatic<ClassUtils> classUtils = mockStatic(ClassUtils.class)) { classUtils.when(() -> ClassUtils.isPresent("com.azure.security.keyvault.secrets.SecretClient", getClass().getClassLoader())) .thenReturn(false); environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); processor.postProcessEnvironment(environment, application); assertFalse(propertySources.contains(NAME_0)); } } @Test void disableAllPropertySourceTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "false"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].name", NAME_1); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); assertFalse(propertySources.contains(NAME_0)); assertFalse(propertySources.contains(NAME_1)); } @Test void emptyPropertySourceListTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); processor.postProcessEnvironment(environment, application); assertEquals(1, propertySources.size()); } @Test void disableSpecificOnePropertySourceTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "false"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].name", NAME_1); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); assertFalse(propertySources.contains(NAME_0)); assertTrue(propertySources.contains(NAME_1)); } @Test void enableByDefaultTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].name", NAME_1); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); assertTrue(propertySources.contains(NAME_0)); assertTrue(propertySources.contains(NAME_1)); } @Test void endPointNotConfiguredTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].name", NAME_1); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); assertFalse(propertySources.contains(NAME_0)); assertTrue(propertySources.contains(NAME_1)); } @Test void defaultPropertySourceNameTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); assertTrue(propertySources.contains(processor.buildPropertySourceName(0))); assertTrue(propertySources.contains(processor.buildPropertySourceName(1))); } @Test void keyVaultPropertySourceHasHighestPriorityIfEnvironmentPropertySourceNotExistTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); processor.postProcessEnvironment(environment, application); Iterator<PropertySource<?>> iterator = propertySources.iterator(); assertEquals(NAME_0, iterator.next().getName()); assertTrue(iterator.hasNext()); } @Test void keyVaultPropertySourceHasLowerPriorityThanEnvironmentPropertySourceTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); propertySources.addFirst(new SystemEnvironmentPropertySource(SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME, Collections.emptyMap())); processor.postProcessEnvironment(environment, application); Iterator<PropertySource<?>> iterator = propertySources.iterator(); while (iterator.hasNext()) { PropertySource<?> propertySource = iterator.next(); if (SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME.equals(propertySource.getName())) { break; } } assertEquals(NAME_0, iterator.next().getName()); assertTrue(iterator.hasNext()); } @Test void keyVaultPropertySourceOrderTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].name", NAME_1); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[1].endpoint", ENDPOINT_1); processor.postProcessEnvironment(environment, application); Iterator<PropertySource<?>> iterator = propertySources.iterator(); assertEquals(NAME_0, iterator.next().getName()); assertEquals(NAME_1, iterator.next().getName()); assertTrue(iterator.hasNext()); } @Test void globalPropertiesTakeEffectIfSpecificPropertiesNotSetTest() { final String globalHostname = "globalHostname"; final String globalApplicationId = "globalApplicationId"; final String globalTenantId = "globalTenantId"; final String globalUsername = "globalUsername"; final int globalMaxRetries = 1; environment.setProperty("spring.cloud.azure.client.application-id", globalApplicationId); environment.setProperty("spring.cloud.azure.credential.username", globalUsername); environment.setProperty("spring.cloud.azure.profile.tenant-id", globalTenantId); environment.setProperty("spring.cloud.azure.proxy.hostname", globalHostname); environment.setProperty("spring.cloud.azure.retry.fixed.max-retries", "" + globalMaxRetries); environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); AzureKeyVaultSecretProperties secretProperties = processor.loadProperties(environment); AzureKeyVaultPropertySourceProperties properties = secretProperties.getPropertySources().get(0); assertEquals(globalUsername, properties.getCredential().getUsername()); assertEquals(globalApplicationId, properties.getClient().getApplicationId()); assertEquals(globalTenantId, properties.getProfile().getTenantId()); assertEquals(globalHostname, properties.getProxy().getHostname()); assertEquals(globalMaxRetries, properties.getRetry().getFixed().getMaxRetries()); } @Test void specificPropertiesHasHigherPriorityThanGlobalPropertiesTest() { final String globalHostname = "globalHostname"; final String globalApplicationId = "globalApplicationId"; final String globalTenantId = "globalTenantId"; final String globalUsername = "globalUsername"; final int globalMaxRetries = 1; final String specificHostname = "specificHostname"; final String specificApplicationId = "specificApplicationId"; final String specificTenantId = "specificTenantId"; final String specificUsername = "specificUsername"; final int specificMaxRetries = 2; environment.setProperty("spring.cloud.azure.client.application-id", globalApplicationId); environment.setProperty("spring.cloud.azure.credential.username", globalUsername); environment.setProperty("spring.cloud.azure.profile.tenant-id", globalTenantId); environment.setProperty("spring.cloud.azure.proxy.hostname", globalHostname); environment.setProperty("spring.cloud.azure.retry.fixed.max-retries", "" + globalMaxRetries); environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].client.application-id", specificApplicationId); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].credential.username", specificUsername); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].profile.tenant-id", specificTenantId); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].proxy.hostname", specificHostname); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].retry.fixed.max-retries", "" + specificMaxRetries); AzureKeyVaultSecretProperties secretProperties = processor.loadProperties(environment); AzureKeyVaultPropertySourceProperties properties = secretProperties.getPropertySources().get(0); assertEquals(specificUsername, properties.getCredential().getUsername()); assertEquals(specificApplicationId, properties.getClient().getApplicationId()); assertEquals(specificTenantId, properties.getProfile().getTenantId()); assertEquals(specificHostname, properties.getProxy().getHostname()); assertEquals(specificMaxRetries, properties.getRetry().getFixed().getMaxRetries()); } @Test void challengeResourceVerificationEnabledCanBeSetAsFalseTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].challenge-resource-verification-enabled", "false"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); AzureKeyVaultSecretProperties secretProperties = processor.loadProperties(environment); AzureKeyVaultPropertySourceProperties properties = secretProperties.getPropertySources().get(0); assertTrue(secretProperties.isChallengeResourceVerificationEnabled()); assertFalse(properties.isChallengeResourceVerificationEnabled()); } @Test void challengeResourceVerificationEnabledIsSetByDefaultTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); AzureKeyVaultSecretProperties secretProperties = processor.loadProperties(environment); AzureKeyVaultPropertySourceProperties properties = secretProperties.getPropertySources().get(0); assertTrue(secretProperties.isChallengeResourceVerificationEnabled()); assertTrue(properties.isChallengeResourceVerificationEnabled()); } @Disabled("Disable it to unblock Azure Dev Ops pipeline: https: @Test void buildKeyVaultPropertySourceWithExceptionTest() { environment.setProperty("spring.cloud.azure.keyvault.secret.property-source-enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].enabled", "true"); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].name", NAME_0); environment.setProperty("spring.cloud.azure.keyvault.secret.property-sources[0].endpoint", ENDPOINT_0); assertThrows(IllegalStateException.class, () -> new KeyVaultEnvironmentPostProcessor(new DeferredLogs(), null).postProcessEnvironment(environment, application)); } }
Is this a new field introduced in the latest version? It was not there in previous versions.
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeStringField("object", "embedding"); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embedding != null) { jsonWriter.writeArrayField("embedding", embedding, JsonWriter::writeDouble); } return jsonWriter.writeEndObject(); }
jsonWriter.writeStringField("object", "embedding");
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embeddingInFloat != null) { jsonWriter.writeArrayField("embedding", embeddingInFloat, JsonWriter::writeFloat); } return jsonWriter.writeEndObject(); }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ @Generated private final List<Double> embedding; private final String embeddingBase64; /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, List<Double> embedding, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; this.embedding = embedding; } /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embedding != null) { return embedding.stream().map(Double::floatValue).collect(Collectors.toList()); } return convertBase64ToFloatList(embeddingBase64); } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { if (embeddingBase64 != null) { return embeddingBase64; } return convertFloatListToBase64(embedding.stream().map(Double::floatValue).collect(Collectors.toList())); } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Double> embeddingInDouble = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInDouble = reader.readArray(JsonReader::getDouble); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } return new EmbeddingItem(embedding, embeddingInDouble, promptIndex); }); } }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embeddingInFloat == null && embeddingBase64 != null) { embeddingInFloat = convertBase64ToFloatList(embeddingBase64); } return embeddingInFloat; } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { if (embeddingBase64 == null && embeddingInFloat != null) { embeddingBase64 = convertFloatListToBase64(embeddingInFloat); } return embeddingBase64; } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Float> embeddingInFloat = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInFloat = reader.readArray(JsonReader::getFloat); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } EmbeddingItem embeddingItem = new EmbeddingItem(embedding, promptIndex); embeddingItem.embeddingInFloat = embeddingInFloat; return embeddingItem; }); } /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; } private List<Double> embedding; /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ private List<Float> embeddingInFloat; /* * List of embeddings value in base64 format for the input prompt. */ private String embeddingBase64; }
If I didn't mock the TimeWindowFilter, then the feature filter will be null when run the following code: ``` FeatureFilter featureFilter = (FeatureFilter) context.getBean(filter.getName()); ``` I just add this mock to make sure it will return a time window filter instance. Does this sound reasonable?
public void setup() { MockitoAnnotations.openMocks(this); when(configProperties.isFailFast()).thenReturn(true); when(context.getBean(Mockito.contains("TimeWindow"))).thenReturn(new TimeWindowFilter()); }
when(context.getBean(Mockito.contains("TimeWindow"))).thenReturn(new TimeWindowFilter());
public void setup() { MockitoAnnotations.openMocks(this); when(configProperties.isFailFast()).thenReturn(true); when(context.getBean(Mockito.contains("TimeWindow"))).thenReturn(new TimeWindowFilter()); }
class ValidationsTest { @Mock private ApplicationContext context; @Mock private FeatureManagementConfigProperties configProperties; private final ObjectMapper objectMapper = JsonMapper.builder() .configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true).build(); private final String testCaseFolderPath = "validations-tests"; private final String inputsUser = "user"; private final String inputsGroups = "groups"; private final String sampleFileNameFilter = "sample"; private final String testsFileNameFilter = "tests"; @BeforeEach @AfterEach public void cleanup() throws Exception { MockitoAnnotations.openMocks(this).close(); } private boolean hasException(ValidationTestCase testCase) { final String exceptionStr = testCase.getIsEnabled().getException(); return exceptionStr != null && !exceptionStr.isEmpty(); } private boolean hasInput(ValidationTestCase testCase) { final LinkedHashMap<String, Object> inputsMap = testCase.getInputs(); return inputsMap != null && !inputsMap.isEmpty(); } private File[] getFileList(String fileNameFilter) { final URL folderUrl = Thread.currentThread().getContextClassLoader().getResource(testCaseFolderPath); assert folderUrl != null; final File folderFile = new File(folderUrl.getFile()); final File[] filteredFiles = folderFile.listFiles(pathname -> pathname.getName().toLowerCase().contains(fileNameFilter)); assert filteredFiles != null; Arrays.sort(filteredFiles, Comparator.comparing(File::getName)); return filteredFiles; } private List<ValidationTestCase> readTestcasesFromFile(File testFile) throws IOException { final String jsonString = Files.readString(testFile.toPath()); final CollectionType typeReference = TypeFactory.defaultInstance().constructCollectionType(List.class, ValidationTestCase.class); return objectMapper.readValue(jsonString, typeReference); } @SuppressWarnings("unchecked") private LinkedHashMap<String, Object> readConfigurationFromFile(File sampleFile) throws IOException { final String jsonString = Files.readString(sampleFile.toPath()); final LinkedHashMap<String, Object> configurations = objectMapper.readValue(jsonString, new TypeReference<>() { }); final Object featureManagementSection = configurations.get("feature_management"); if (featureManagementSection.getClass().isAssignableFrom(LinkedHashMap.class)) { return (LinkedHashMap<String, Object>) featureManagementSection; } return new LinkedHashMap<>(); } @SuppressWarnings("unchecked") private void runTestcases(File sampleFile, File testsFile) throws IOException { final FeatureManagementProperties managementProperties = new FeatureManagementProperties(); managementProperties.putAll(readConfigurationFromFile(sampleFile)); final FeatureManager featureManager = new FeatureManager(context, managementProperties, configProperties); final List<ValidationTestCase> testCases = readTestcasesFromFile(testsFile); System.out.println("Running test case from file: " + testsFile.getName()); for (int i = 0; i < testCases.size(); i++) { System.out.println("Test case " + i + " : " + testCases.get(i).getDescription()); if (hasException(testCases.get(i))) { assertNull(managementProperties.getOnOff().get(testCases.get(i).getFeatureFlagName())); } else { if (hasInput(testCases.get(i))) { final Object userObj = testCases.get(i).getInputs().get(inputsUser); final Object groupsObj = testCases.get(i).getInputs().get(inputsGroups); final String user = userObj != null ? userObj.toString() : null; final List<String> groups = groupsObj != null ? (List<String>) groupsObj : null; when(context.getBean(Mockito.contains("Targeting"))).thenReturn(new TargetingFilter(new TargetingFilterTestContextAccessor(user, groups))); } final Boolean result = featureManager.isEnabled(testCases.get(i).getFeatureFlagName()); assertEquals(result.toString(), testCases.get(i).getIsEnabled().getResult()); } } } @Test void validationsTest() throws IOException { final File[] sampleFiles = getFileList(sampleFileNameFilter); final File[] testsFiles = getFileList(testsFileNameFilter); if (sampleFiles.length != testsFiles.length) { throw new IllegalArgumentException("The sample files and tests files should have same count."); } for (int i = 0; i < sampleFiles.length; i++) { if (sampleFiles[i].getName().contains("TargetingFilter.sample")) { continue; } runTestcases(sampleFiles[i], testsFiles[i]); } } }
class ValidationsTest { @Mock private ApplicationContext context; @Mock private FeatureManagementConfigProperties configProperties; private static final Logger LOGGER = LoggerFactory.getLogger(ValidationsTest.class); private static final ObjectMapper OBJECT_MAPPER = JsonMapper.builder() .configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true).build(); private static final String TEST_CASE_FOLDER_PATH = "validations-tests"; private final String inputsUser = "user"; private final String inputsGroups = "groups"; private static final String SAMPLE_FILE_NAME_FILTER = "sample"; private static final String TESTS_FILE_NAME_FILTER = "tests"; @BeforeEach @AfterEach public void cleanup() throws Exception { MockitoAnnotations.openMocks(this).close(); } private boolean hasException(ValidationTestCase testCase) { final String exceptionStr = testCase.getIsEnabled().getException(); return exceptionStr != null && !exceptionStr.isEmpty(); } private boolean hasInput(ValidationTestCase testCase) { final LinkedHashMap<String, Object> inputsMap = testCase.getInputs(); return inputsMap != null && !inputsMap.isEmpty(); } private static File[] getFileList(String fileNameFilter) { final URL folderUrl = Thread.currentThread().getContextClassLoader().getResource(TEST_CASE_FOLDER_PATH); assert folderUrl != null; final File folderFile = new File(folderUrl.getFile()); final File[] filteredFiles = folderFile .listFiles(pathname -> pathname.getName().toLowerCase().contains(fileNameFilter)); assert filteredFiles != null; Arrays.sort(filteredFiles, Comparator.comparing(File::getName)); return filteredFiles; } private List<ValidationTestCase> readTestcasesFromFile(File testFile) throws IOException { final String jsonString = Files.readString(testFile.toPath()); final CollectionType typeReference = TypeFactory.defaultInstance().constructCollectionType(List.class, ValidationTestCase.class); return OBJECT_MAPPER.readValue(jsonString, typeReference); } @SuppressWarnings("unchecked") private static LinkedHashMap<String, Object> readConfigurationFromFile(File sampleFile) throws IOException { final String jsonString = Files.readString(sampleFile.toPath()); final LinkedHashMap<String, Object> configurations = OBJECT_MAPPER.readValue(jsonString, new TypeReference<>() { }); final Object featureManagementSection = configurations.get("feature_management"); if (featureManagementSection.getClass().isAssignableFrom(LinkedHashMap.class)) { return (LinkedHashMap<String, Object>) featureManagementSection; } throw new IllegalArgumentException("feature_management part is not a map"); } static Stream<Arguments> testProvider() throws IOException { List<Arguments> arguments = new ArrayList<>(); File[] files = getFileList(TESTS_FILE_NAME_FILTER); final File[] sampleFiles = getFileList(SAMPLE_FILE_NAME_FILTER); List<FeatureManagementProperties> properties = new ArrayList<>(); for (File sampleFile : sampleFiles) { final FeatureManagementProperties managementProperties = new FeatureManagementProperties(); managementProperties.putAll(readConfigurationFromFile(sampleFile)); properties.add(managementProperties); } for (int i = 0; i < files.length; i++) { if (files[i].getName().contains(("TargetingFilter"))) { continue; } arguments.add(Arguments.of(files[i].getName(), files[i], properties.get(i))); } return arguments.stream(); } @ParameterizedTest(name = "{0}") @MethodSource("testProvider") void validationTest(String name, File testsFile, FeatureManagementProperties managementProperties) throws IOException { LOGGER.debug("Running test case from file: " + name); final FeatureManager featureManager = new FeatureManager(context, managementProperties, configProperties); List<ValidationTestCase> testCases = readTestcasesFromFile(testsFile); for (ValidationTestCase testCase : testCases) { LOGGER.debug("Test case : " + testCase.getDescription()); if (hasException(testCase)) { assertNull(managementProperties.getOnOff().get(testCase.getFeatureFlagName())); continue; } if (hasInput(testCase)) { final Object userObj = testCase.getInputs().get(inputsUser); final Object groupsObj = testCase.getInputs().get(inputsGroups); final String user = userObj != null ? userObj.toString() : null; @SuppressWarnings("unchecked") final List<String> groups = groupsObj != null ? (List<String>) groupsObj : null; when(context.getBean(Mockito.contains("Targeting"))) .thenReturn(new TargetingFilter(new TargetingFilterTestContextAccessor(user, groups))); } final Boolean result = featureManager.isEnabled(testCase.getFeatureFlagName()); assertEquals(result.toString(), testCase.getIsEnabled().getResult()); } } }
```suggestion assertTrue(response.getDetectedLanguage().getConfidence() > 0.8); ```
public void breakSentenceWithAutoDetect() { BreakSentenceItem response = getTranslationClient().findSentenceBoundaries("hello world"); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertTrue(response.getDetectedLanguage().getConfidence() > 0.8); assertEquals(11, response.getSentencesLengths().get(0)); }
assertTrue(response.getDetectedLanguage().getConfidence() > 0.8);
public void breakSentenceWithAutoDetect() { BreakSentenceItem response = getTranslationClient().findSentenceBoundaries("hello world"); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertTrue(response.getDetectedLanguage().getConfidence() > 0.8); assertEquals(11, response.getSentencesLengths().get(0)); }
class BreakSentenceTests extends TextTranslationClientBase { @Test @Test public void breakSentenceWithLanguage() { String content = "Mi familia es muy muy bonita. no padre .mi madre es bonita y muy bajo . mi hermano es alto. Me gusta mi familia."; BreakSentenceItem response = getTranslationClient().findSentenceBoundaries(content, "es", null); int[] expectedLengths = new int[]{ 30, 42, 20, 20 }; for (int i = 0; i < expectedLengths.length; i++) { assertEquals(expectedLengths[i], response.getSentencesLengths().get(i)); } } @Test public void breakSentenceWithLanguageAndScript() { BreakSentenceItem response = getTranslationClient().findSentenceBoundaries("zhè shì gè cè shì。", "zh-Hans", "Latn"); assertEquals(18, response.getSentencesLengths().get(0)); } @Test public void breakSentenceWithMultipleLanguages() { ArrayList<String> content = new ArrayList<>(); content.add("hello world"); content.add("العالم هو مكان مثير جدا للاهتمام"); List<BreakSentenceItem> response = getTranslationClient().findSentenceBoundaries(content); assertEquals("en", response.get(0).getDetectedLanguage().getLanguage()); assertEquals("ar", response.get(1).getDetectedLanguage().getLanguage()); assertEquals(11, response.get(0).getSentencesLengths().get(0)); assertEquals(32, response.get(1).getSentencesLengths().get(0)); } }
class BreakSentenceTests extends TextTranslationClientBase { @Test @Test public void breakSentenceWithLanguage() { String content = "Mi familia es muy muy bonita. no padre .mi madre es bonita y muy bajo . mi hermano es alto. Me gusta mi familia."; BreakSentenceItem response = getTranslationClient().findSentenceBoundaries(content, "es", null); int[] expectedLengths = new int[]{ 30, 42, 20, 20 }; for (int i = 0; i < expectedLengths.length; i++) { assertEquals(expectedLengths[i], response.getSentencesLengths().get(i)); } } @Test public void breakSentenceWithLanguageAndScript() { BreakSentenceItem response = getTranslationClient().findSentenceBoundaries("zhè shì gè cè shì。", "zh-Hans", "Latn"); assertEquals(18, response.getSentencesLengths().get(0)); } @Test public void breakSentenceWithMultipleLanguages() { ArrayList<String> content = new ArrayList<>(); content.add("hello world"); content.add("العالم هو مكان مثير جدا للاهتمام"); List<BreakSentenceItem> response = getTranslationClient().findSentenceBoundaries(content); assertEquals("en", response.get(0).getDetectedLanguage().getLanguage()); assertEquals("ar", response.get(1).getDetectedLanguage().getLanguage()); assertEquals(11, response.get(0).getSentencesLengths().get(0)); assertEquals(32, response.get(1).getSentencesLengths().get(0)); } }
You can use the source text `shit this is fucking crazy shit fuck`.
public void translateWithProfanity() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("zh-Hans") .setProfanityAction(ProfanityAction.MARKED) .setProfanityMarker(ProfanityMarker.ASTERISK); TranslatedTextItem response = getTranslationClient().translate("shit this is fucking crazy", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); }
public void translateWithProfanity() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("zh-Hans") .setProfanityAction(ProfanityAction.MARKED) .setProfanityMarker(ProfanityMarker.ASTERISK); TranslatedTextItem response = getTranslationClient().translate("shit this is fucking crazy shit fuck", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertTrue(response.getTranslations().get(0).getText().contains("***")); }
class TranslateTests extends TextTranslationClientBase { @Test @LiveOnly public void translateBasic() { TranslatedTextItem response = getTranslationClient().translate("cs", "Hola mundo"); assertEquals(1, response.getTranslations().size()); assertEquals("cs", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @LiveOnly public void translateOneItemWithOptions() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClient().translate("Hola mundo", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("cs", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @LiveOnly public void translateMultipleItemsWithOptions() { ArrayList<String> content = new ArrayList<>(); content.add("This is a test."); content.add("This is a test sentence two."); content.add("This is another test."); TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); List<TranslatedTextItem> response = getTranslationClient().translate(content, translateOptions); assertEquals(1, response.get(0).getTranslations().size()); assertEquals("cs", response.get(0).getTranslations().get(0).getTargetLanguage()); assertNotNull(response.get(0).getTranslations().get(0).getText()); } @Test @LiveOnly public void translateWithAutoDetect() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClient().translate("This is a test.", translateOptions); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getTranslations().size()); assertEquals("cs", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @LiveOnly public void translateWithNoTranslateTag() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("en") .setSourceLanguage("zh-Hans") .setTextType(TextType.HTML); TranslatedTextItem response = getTranslationClient().translate("<span class=notranslate>今天是怎么回事是</span>非常可怕的", translateOptions); assertEquals(1, response.getTranslations().size()); assertTrue(response.getTranslations().get(0).getText().contains("今天是怎么回事是")); } @Test @LiveOnly public void translateWithDictionaryTag() { TranslateOptions translateOptions = new TranslateOptions() .setSourceLanguage("en") .addTargetLanguage("es"); TranslatedTextItem response = getTranslationClient().translate("The word < mstrans:dictionary translation =\"wordomatic\">wordomatic</mstrans:dictionary> is a dictionary entry.", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("es", response.getTranslations().get(0).getTargetLanguage()); assertTrue(response.getTranslations().get(0).getText().contains("wordomatic")); } @Test @LiveOnly public void translateWithTransliteration() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("zh-Hans") .setSourceLanguage("ar") .setSourceLanguageScript("Latn") .setTargetLanguageScript("Latn"); TranslatedTextItem response = getTranslationClient().translate("hudha akhtabar.", translateOptions); assertNotNull(response.getSourceText().getText()); assertEquals("zh-Hans", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test public void translateFromLatinToLatinScript() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("ta") .setSourceLanguage("hi") .setSourceLanguageScript("Latn") .setTargetLanguageScript("Latn"); TranslatedTextItem response = getTranslationClient().translate("ap kaise ho", translateOptions); assertNotNull(response.getTranslations().get(0).getTransliteration().getScript()); assertEquals("eppadi irukkiraai?", response.getTranslations().get(0).getTransliteration().getText()); } @Test public void translateWithMultipleInputTexts() { ArrayList<String> content = new ArrayList<>(); content.add("This is a test."); content.add("Esto es una prueba."); content.add("Dies ist ein Test."); TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); List<TranslatedTextItem> response = getTranslationClient().translate(content, translateOptions); assertEquals(3, response.size()); assertEquals("en", response.get(0).getDetectedLanguage().getLanguage()); assertEquals("es", response.get(1).getDetectedLanguage().getLanguage()); assertEquals("de", response.get(2).getDetectedLanguage().getLanguage()); assertEquals(1, response.get(0).getDetectedLanguage().getConfidence()); assertEquals(1, response.get(1).getDetectedLanguage().getConfidence()); assertEquals(1, response.get(2).getDetectedLanguage().getConfidence()); assertNotNull(response.get(0).getTranslations().get(0).getText()); assertNotNull(response.get(1).getTranslations().get(0).getText()); assertNotNull(response.get(2).getTranslations().get(0).getText()); } @Test public void translateMultipleTargetLanguages() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs") .addTargetLanguage("es") .addTargetLanguage("de"); TranslatedTextItem response = getTranslationClient().translate("This is a test.", translateOptions); assertEquals(3, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertNotNull(response.getTranslations().get(0).getText()); assertNotNull(response.getTranslations().get(1).getText()); assertNotNull(response.getTranslations().get(2).getText()); } @Test public void translateDifferentTextTypes() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs") .setTextType(TextType.HTML); TranslatedTextItem response = getTranslationClient().translate("<html><body>This <b>is</b> a test.</body></html>", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); } @Test @Test public void translateWithAlignment() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs") .setIncludeAlignment(true); TranslatedTextItem response = getTranslationClient().translate("It is a beautiful morning", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertNotNull(response.getTranslations().get(0).getAlignment().getProjections()); } @Test public void translateWithIncludeSentenceLength() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("fr") .setIncludeSentenceLength(true); TranslatedTextItem response = getTranslationClient().translate("La réponse se trouve dans la traduction automatique. La meilleure technologie de traduction automatique ne peut pas toujours fournir des traductions adaptées à un site ou des utilisateurs comme un être humain. Il suffit de copier et coller un extrait de code n'importe où.", translateOptions); assertEquals("fr", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertEquals(3, response.getTranslations().get(0).getSentenceBoundaries().getSourceSentencesLengths().size()); assertEquals(3, response.getTranslations().get(0).getSentenceBoundaries().getTranslatedSentencesLengths().size()); } @Test public void translateWithCustomEndpoint() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClientWithCustomEndpoint().translate("It is a beautiful morning", translateOptions); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertNotNull(response.getTranslations().get(0).getText()); } @Test public void translateWithToken() throws Exception { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClientWithToken().translate("This is a test.", translateOptions); assertNotNull(response.getTranslations().get(0).getText()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @PlaybackOnly public void translateWithAad() throws Exception { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClientWithAadAuth().translate("This is a test.", translateOptions); assertNotNull(response.getTranslations().get(0).getText()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertNotNull(response.getTranslations().get(0).getText()); } }
class TranslateTests extends TextTranslationClientBase { @Test @LiveOnly public void translateBasic() { TranslatedTextItem response = getTranslationClient().translate("cs", "Hola mundo"); assertEquals(1, response.getTranslations().size()); assertEquals("cs", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @LiveOnly public void translateOneItemWithOptions() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClient().translate("Hola mundo", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("cs", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @LiveOnly public void translateMultipleItemsWithOptions() { ArrayList<String> content = new ArrayList<>(); content.add("This is a test."); content.add("This is a test sentence two."); content.add("This is another test."); TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); List<TranslatedTextItem> response = getTranslationClient().translate(content, translateOptions); assertEquals(1, response.get(0).getTranslations().size()); assertEquals("cs", response.get(0).getTranslations().get(0).getTargetLanguage()); assertNotNull(response.get(0).getTranslations().get(0).getText()); } @Test @LiveOnly public void translateWithAutoDetect() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClient().translate("This is a test.", translateOptions); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getTranslations().size()); assertEquals("cs", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @LiveOnly public void translateWithNoTranslateTag() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("en") .setSourceLanguage("zh-Hans") .setTextType(TextType.HTML); TranslatedTextItem response = getTranslationClient().translate("<span class=notranslate>今天是怎么回事是</span>非常可怕的", translateOptions); assertEquals(1, response.getTranslations().size()); assertTrue(response.getTranslations().get(0).getText().contains("今天是怎么回事是")); } @Test @LiveOnly public void translateWithDictionaryTag() { TranslateOptions translateOptions = new TranslateOptions() .setSourceLanguage("en") .addTargetLanguage("es"); TranslatedTextItem response = getTranslationClient().translate("The word < mstrans:dictionary translation =\"wordomatic\">wordomatic</mstrans:dictionary> is a dictionary entry.", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("es", response.getTranslations().get(0).getTargetLanguage()); assertTrue(response.getTranslations().get(0).getText().contains("wordomatic")); } @Test @LiveOnly public void translateWithTransliteration() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("zh-Hans") .setSourceLanguage("ar") .setSourceLanguageScript("Latn") .setTargetLanguageScript("Latn"); TranslatedTextItem response = getTranslationClient().translate("hudha akhtabar.", translateOptions); assertNotNull(response.getSourceText().getText()); assertEquals("zh-Hans", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test public void translateFromLatinToLatinScript() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("ta") .setSourceLanguage("hi") .setSourceLanguageScript("Latn") .setTargetLanguageScript("Latn"); TranslatedTextItem response = getTranslationClient().translate("ap kaise ho", translateOptions); assertNotNull(response.getTranslations().get(0).getTransliteration().getScript()); assertEquals("eppadi irukkiraai?", response.getTranslations().get(0).getTransliteration().getText()); } @Test public void translateWithMultipleInputTexts() { ArrayList<String> content = new ArrayList<>(); content.add("This is a test."); content.add("Esto es una prueba."); content.add("Dies ist ein Test."); TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); List<TranslatedTextItem> response = getTranslationClient().translate(content, translateOptions); assertEquals(3, response.size()); assertEquals("en", response.get(0).getDetectedLanguage().getLanguage()); assertEquals("es", response.get(1).getDetectedLanguage().getLanguage()); assertEquals("de", response.get(2).getDetectedLanguage().getLanguage()); assertEquals(1, response.get(0).getDetectedLanguage().getConfidence()); assertEquals(1, response.get(1).getDetectedLanguage().getConfidence()); assertEquals(1, response.get(2).getDetectedLanguage().getConfidence()); assertNotNull(response.get(0).getTranslations().get(0).getText()); assertNotNull(response.get(1).getTranslations().get(0).getText()); assertNotNull(response.get(2).getTranslations().get(0).getText()); } @Test public void translateMultipleTargetLanguages() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs") .addTargetLanguage("es") .addTargetLanguage("de"); TranslatedTextItem response = getTranslationClient().translate("This is a test.", translateOptions); assertEquals(3, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertNotNull(response.getTranslations().get(0).getText()); assertNotNull(response.getTranslations().get(1).getText()); assertNotNull(response.getTranslations().get(2).getText()); } @Test public void translateDifferentTextTypes() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs") .setTextType(TextType.HTML); TranslatedTextItem response = getTranslationClient().translate("<html><body>This <b>is</b> a test.</body></html>", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); } @Test @Test public void translateWithAlignment() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs") .setIncludeAlignment(true); TranslatedTextItem response = getTranslationClient().translate("It is a beautiful morning", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertNotNull(response.getTranslations().get(0).getAlignment().getProjections()); } @Test public void translateWithIncludeSentenceLength() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("fr") .setIncludeSentenceLength(true); TranslatedTextItem response = getTranslationClient().translate("La réponse se trouve dans la traduction automatique. La meilleure technologie de traduction automatique ne peut pas toujours fournir des traductions adaptées à un site ou des utilisateurs comme un être humain. Il suffit de copier et coller un extrait de code n'importe où.", translateOptions); assertEquals("fr", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertEquals(3, response.getTranslations().get(0).getSentenceBoundaries().getSourceSentencesLengths().size()); assertEquals(3, response.getTranslations().get(0).getSentenceBoundaries().getTranslatedSentencesLengths().size()); } @Test public void translateWithCustomEndpoint() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClientWithCustomEndpoint().translate("It is a beautiful morning", translateOptions); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertNotNull(response.getTranslations().get(0).getText()); } @Test public void translateWithToken() throws Exception { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClientWithToken().translate("This is a test.", translateOptions); assertNotNull(response.getTranslations().get(0).getText()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @PlaybackOnly public void translateWithAad() throws Exception { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClientWithAadAuth().translate("This is a test.", translateOptions); assertNotNull(response.getTranslations().get(0).getText()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertNotNull(response.getTranslations().get(0).getText()); } }
Updated, here are the live test pipeline results: https://dev.azure.com/azure-sdk/internal/_build/results?buildId=4098377&view=ms.vss-test-web.build-test-results-tab
public void breakSentenceWithAutoDetect() { BreakSentenceItem response = getTranslationClient().findSentenceBoundaries("hello world"); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertTrue(response.getDetectedLanguage().getConfidence() > 0.8); assertEquals(11, response.getSentencesLengths().get(0)); }
assertTrue(response.getDetectedLanguage().getConfidence() > 0.8);
public void breakSentenceWithAutoDetect() { BreakSentenceItem response = getTranslationClient().findSentenceBoundaries("hello world"); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertTrue(response.getDetectedLanguage().getConfidence() > 0.8); assertEquals(11, response.getSentencesLengths().get(0)); }
class BreakSentenceTests extends TextTranslationClientBase { @Test @Test public void breakSentenceWithLanguage() { String content = "Mi familia es muy muy bonita. no padre .mi madre es bonita y muy bajo . mi hermano es alto. Me gusta mi familia."; BreakSentenceItem response = getTranslationClient().findSentenceBoundaries(content, "es", null); int[] expectedLengths = new int[]{ 30, 42, 20, 20 }; for (int i = 0; i < expectedLengths.length; i++) { assertEquals(expectedLengths[i], response.getSentencesLengths().get(i)); } } @Test public void breakSentenceWithLanguageAndScript() { BreakSentenceItem response = getTranslationClient().findSentenceBoundaries("zhè shì gè cè shì。", "zh-Hans", "Latn"); assertEquals(18, response.getSentencesLengths().get(0)); } @Test public void breakSentenceWithMultipleLanguages() { ArrayList<String> content = new ArrayList<>(); content.add("hello world"); content.add("العالم هو مكان مثير جدا للاهتمام"); List<BreakSentenceItem> response = getTranslationClient().findSentenceBoundaries(content); assertEquals("en", response.get(0).getDetectedLanguage().getLanguage()); assertEquals("ar", response.get(1).getDetectedLanguage().getLanguage()); assertEquals(11, response.get(0).getSentencesLengths().get(0)); assertEquals(32, response.get(1).getSentencesLengths().get(0)); } }
class BreakSentenceTests extends TextTranslationClientBase { @Test @Test public void breakSentenceWithLanguage() { String content = "Mi familia es muy muy bonita. no padre .mi madre es bonita y muy bajo . mi hermano es alto. Me gusta mi familia."; BreakSentenceItem response = getTranslationClient().findSentenceBoundaries(content, "es", null); int[] expectedLengths = new int[]{ 30, 42, 20, 20 }; for (int i = 0; i < expectedLengths.length; i++) { assertEquals(expectedLengths[i], response.getSentencesLengths().get(i)); } } @Test public void breakSentenceWithLanguageAndScript() { BreakSentenceItem response = getTranslationClient().findSentenceBoundaries("zhè shì gè cè shì。", "zh-Hans", "Latn"); assertEquals(18, response.getSentencesLengths().get(0)); } @Test public void breakSentenceWithMultipleLanguages() { ArrayList<String> content = new ArrayList<>(); content.add("hello world"); content.add("العالم هو مكان مثير جدا للاهتمام"); List<BreakSentenceItem> response = getTranslationClient().findSentenceBoundaries(content); assertEquals("en", response.get(0).getDetectedLanguage().getLanguage()); assertEquals("ar", response.get(1).getDetectedLanguage().getLanguage()); assertEquals(11, response.get(0).getSentencesLengths().get(0)); assertEquals(32, response.get(1).getSentencesLengths().get(0)); } }
new field removed.
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeStringField("object", "embedding"); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embedding != null) { jsonWriter.writeArrayField("embedding", embedding, JsonWriter::writeDouble); } return jsonWriter.writeEndObject(); }
jsonWriter.writeStringField("object", "embedding");
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embeddingInFloat != null) { jsonWriter.writeArrayField("embedding", embeddingInFloat, JsonWriter::writeFloat); } return jsonWriter.writeEndObject(); }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ @Generated private final List<Double> embedding; private final String embeddingBase64; /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, List<Double> embedding, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; this.embedding = embedding; } /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embedding != null) { return embedding.stream().map(Double::floatValue).collect(Collectors.toList()); } return convertBase64ToFloatList(embeddingBase64); } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { if (embeddingBase64 != null) { return embeddingBase64; } return convertFloatListToBase64(embedding.stream().map(Double::floatValue).collect(Collectors.toList())); } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Double> embeddingInDouble = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInDouble = reader.readArray(JsonReader::getDouble); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } return new EmbeddingItem(embedding, embeddingInDouble, promptIndex); }); } }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embeddingInFloat == null && embeddingBase64 != null) { embeddingInFloat = convertBase64ToFloatList(embeddingBase64); } return embeddingInFloat; } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { if (embeddingBase64 == null && embeddingInFloat != null) { embeddingBase64 = convertFloatListToBase64(embeddingInFloat); } return embeddingBase64; } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Float> embeddingInFloat = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInFloat = reader.readArray(JsonReader::getFloat); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } EmbeddingItem embeddingItem = new EmbeddingItem(embedding, promptIndex); embeddingItem.embeddingInFloat = embeddingInFloat; return embeddingItem; }); } /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; } private List<Double> embedding; /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ private List<Float> embeddingInFloat; /* * List of embeddings value in base64 format for the input prompt. */ private String embeddingBase64; }
Updated, here are the live test pipeline results: https://dev.azure.com/azure-sdk/internal/_build/results?buildId=4098377&view=ms.vss-test-web.build-test-results-tab
public void translateWithProfanity() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("zh-Hans") .setProfanityAction(ProfanityAction.MARKED) .setProfanityMarker(ProfanityMarker.ASTERISK); TranslatedTextItem response = getTranslationClient().translate("shit this is fucking crazy", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); }
public void translateWithProfanity() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("zh-Hans") .setProfanityAction(ProfanityAction.MARKED) .setProfanityMarker(ProfanityMarker.ASTERISK); TranslatedTextItem response = getTranslationClient().translate("shit this is fucking crazy shit fuck", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertTrue(response.getTranslations().get(0).getText().contains("***")); }
class TranslateTests extends TextTranslationClientBase { @Test @LiveOnly public void translateBasic() { TranslatedTextItem response = getTranslationClient().translate("cs", "Hola mundo"); assertEquals(1, response.getTranslations().size()); assertEquals("cs", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @LiveOnly public void translateOneItemWithOptions() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClient().translate("Hola mundo", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("cs", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @LiveOnly public void translateMultipleItemsWithOptions() { ArrayList<String> content = new ArrayList<>(); content.add("This is a test."); content.add("This is a test sentence two."); content.add("This is another test."); TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); List<TranslatedTextItem> response = getTranslationClient().translate(content, translateOptions); assertEquals(1, response.get(0).getTranslations().size()); assertEquals("cs", response.get(0).getTranslations().get(0).getTargetLanguage()); assertNotNull(response.get(0).getTranslations().get(0).getText()); } @Test @LiveOnly public void translateWithAutoDetect() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClient().translate("This is a test.", translateOptions); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getTranslations().size()); assertEquals("cs", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @LiveOnly public void translateWithNoTranslateTag() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("en") .setSourceLanguage("zh-Hans") .setTextType(TextType.HTML); TranslatedTextItem response = getTranslationClient().translate("<span class=notranslate>今天是怎么回事是</span>非常可怕的", translateOptions); assertEquals(1, response.getTranslations().size()); assertTrue(response.getTranslations().get(0).getText().contains("今天是怎么回事是")); } @Test @LiveOnly public void translateWithDictionaryTag() { TranslateOptions translateOptions = new TranslateOptions() .setSourceLanguage("en") .addTargetLanguage("es"); TranslatedTextItem response = getTranslationClient().translate("The word < mstrans:dictionary translation =\"wordomatic\">wordomatic</mstrans:dictionary> is a dictionary entry.", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("es", response.getTranslations().get(0).getTargetLanguage()); assertTrue(response.getTranslations().get(0).getText().contains("wordomatic")); } @Test @LiveOnly public void translateWithTransliteration() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("zh-Hans") .setSourceLanguage("ar") .setSourceLanguageScript("Latn") .setTargetLanguageScript("Latn"); TranslatedTextItem response = getTranslationClient().translate("hudha akhtabar.", translateOptions); assertNotNull(response.getSourceText().getText()); assertEquals("zh-Hans", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test public void translateFromLatinToLatinScript() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("ta") .setSourceLanguage("hi") .setSourceLanguageScript("Latn") .setTargetLanguageScript("Latn"); TranslatedTextItem response = getTranslationClient().translate("ap kaise ho", translateOptions); assertNotNull(response.getTranslations().get(0).getTransliteration().getScript()); assertEquals("eppadi irukkiraai?", response.getTranslations().get(0).getTransliteration().getText()); } @Test public void translateWithMultipleInputTexts() { ArrayList<String> content = new ArrayList<>(); content.add("This is a test."); content.add("Esto es una prueba."); content.add("Dies ist ein Test."); TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); List<TranslatedTextItem> response = getTranslationClient().translate(content, translateOptions); assertEquals(3, response.size()); assertEquals("en", response.get(0).getDetectedLanguage().getLanguage()); assertEquals("es", response.get(1).getDetectedLanguage().getLanguage()); assertEquals("de", response.get(2).getDetectedLanguage().getLanguage()); assertEquals(1, response.get(0).getDetectedLanguage().getConfidence()); assertEquals(1, response.get(1).getDetectedLanguage().getConfidence()); assertEquals(1, response.get(2).getDetectedLanguage().getConfidence()); assertNotNull(response.get(0).getTranslations().get(0).getText()); assertNotNull(response.get(1).getTranslations().get(0).getText()); assertNotNull(response.get(2).getTranslations().get(0).getText()); } @Test public void translateMultipleTargetLanguages() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs") .addTargetLanguage("es") .addTargetLanguage("de"); TranslatedTextItem response = getTranslationClient().translate("This is a test.", translateOptions); assertEquals(3, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertNotNull(response.getTranslations().get(0).getText()); assertNotNull(response.getTranslations().get(1).getText()); assertNotNull(response.getTranslations().get(2).getText()); } @Test public void translateDifferentTextTypes() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs") .setTextType(TextType.HTML); TranslatedTextItem response = getTranslationClient().translate("<html><body>This <b>is</b> a test.</body></html>", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); } @Test @Test public void translateWithAlignment() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs") .setIncludeAlignment(true); TranslatedTextItem response = getTranslationClient().translate("It is a beautiful morning", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertNotNull(response.getTranslations().get(0).getAlignment().getProjections()); } @Test public void translateWithIncludeSentenceLength() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("fr") .setIncludeSentenceLength(true); TranslatedTextItem response = getTranslationClient().translate("La réponse se trouve dans la traduction automatique. La meilleure technologie de traduction automatique ne peut pas toujours fournir des traductions adaptées à un site ou des utilisateurs comme un être humain. Il suffit de copier et coller un extrait de code n'importe où.", translateOptions); assertEquals("fr", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertEquals(3, response.getTranslations().get(0).getSentenceBoundaries().getSourceSentencesLengths().size()); assertEquals(3, response.getTranslations().get(0).getSentenceBoundaries().getTranslatedSentencesLengths().size()); } @Test public void translateWithCustomEndpoint() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClientWithCustomEndpoint().translate("It is a beautiful morning", translateOptions); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertNotNull(response.getTranslations().get(0).getText()); } @Test public void translateWithToken() throws Exception { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClientWithToken().translate("This is a test.", translateOptions); assertNotNull(response.getTranslations().get(0).getText()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @PlaybackOnly public void translateWithAad() throws Exception { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClientWithAadAuth().translate("This is a test.", translateOptions); assertNotNull(response.getTranslations().get(0).getText()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertNotNull(response.getTranslations().get(0).getText()); } }
class TranslateTests extends TextTranslationClientBase { @Test @LiveOnly public void translateBasic() { TranslatedTextItem response = getTranslationClient().translate("cs", "Hola mundo"); assertEquals(1, response.getTranslations().size()); assertEquals("cs", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @LiveOnly public void translateOneItemWithOptions() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClient().translate("Hola mundo", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("cs", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @LiveOnly public void translateMultipleItemsWithOptions() { ArrayList<String> content = new ArrayList<>(); content.add("This is a test."); content.add("This is a test sentence two."); content.add("This is another test."); TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); List<TranslatedTextItem> response = getTranslationClient().translate(content, translateOptions); assertEquals(1, response.get(0).getTranslations().size()); assertEquals("cs", response.get(0).getTranslations().get(0).getTargetLanguage()); assertNotNull(response.get(0).getTranslations().get(0).getText()); } @Test @LiveOnly public void translateWithAutoDetect() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClient().translate("This is a test.", translateOptions); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getTranslations().size()); assertEquals("cs", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @LiveOnly public void translateWithNoTranslateTag() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("en") .setSourceLanguage("zh-Hans") .setTextType(TextType.HTML); TranslatedTextItem response = getTranslationClient().translate("<span class=notranslate>今天是怎么回事是</span>非常可怕的", translateOptions); assertEquals(1, response.getTranslations().size()); assertTrue(response.getTranslations().get(0).getText().contains("今天是怎么回事是")); } @Test @LiveOnly public void translateWithDictionaryTag() { TranslateOptions translateOptions = new TranslateOptions() .setSourceLanguage("en") .addTargetLanguage("es"); TranslatedTextItem response = getTranslationClient().translate("The word < mstrans:dictionary translation =\"wordomatic\">wordomatic</mstrans:dictionary> is a dictionary entry.", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("es", response.getTranslations().get(0).getTargetLanguage()); assertTrue(response.getTranslations().get(0).getText().contains("wordomatic")); } @Test @LiveOnly public void translateWithTransliteration() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("zh-Hans") .setSourceLanguage("ar") .setSourceLanguageScript("Latn") .setTargetLanguageScript("Latn"); TranslatedTextItem response = getTranslationClient().translate("hudha akhtabar.", translateOptions); assertNotNull(response.getSourceText().getText()); assertEquals("zh-Hans", response.getTranslations().get(0).getTargetLanguage()); assertNotNull(response.getTranslations().get(0).getText()); } @Test public void translateFromLatinToLatinScript() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("ta") .setSourceLanguage("hi") .setSourceLanguageScript("Latn") .setTargetLanguageScript("Latn"); TranslatedTextItem response = getTranslationClient().translate("ap kaise ho", translateOptions); assertNotNull(response.getTranslations().get(0).getTransliteration().getScript()); assertEquals("eppadi irukkiraai?", response.getTranslations().get(0).getTransliteration().getText()); } @Test public void translateWithMultipleInputTexts() { ArrayList<String> content = new ArrayList<>(); content.add("This is a test."); content.add("Esto es una prueba."); content.add("Dies ist ein Test."); TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); List<TranslatedTextItem> response = getTranslationClient().translate(content, translateOptions); assertEquals(3, response.size()); assertEquals("en", response.get(0).getDetectedLanguage().getLanguage()); assertEquals("es", response.get(1).getDetectedLanguage().getLanguage()); assertEquals("de", response.get(2).getDetectedLanguage().getLanguage()); assertEquals(1, response.get(0).getDetectedLanguage().getConfidence()); assertEquals(1, response.get(1).getDetectedLanguage().getConfidence()); assertEquals(1, response.get(2).getDetectedLanguage().getConfidence()); assertNotNull(response.get(0).getTranslations().get(0).getText()); assertNotNull(response.get(1).getTranslations().get(0).getText()); assertNotNull(response.get(2).getTranslations().get(0).getText()); } @Test public void translateMultipleTargetLanguages() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs") .addTargetLanguage("es") .addTargetLanguage("de"); TranslatedTextItem response = getTranslationClient().translate("This is a test.", translateOptions); assertEquals(3, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertNotNull(response.getTranslations().get(0).getText()); assertNotNull(response.getTranslations().get(1).getText()); assertNotNull(response.getTranslations().get(2).getText()); } @Test public void translateDifferentTextTypes() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs") .setTextType(TextType.HTML); TranslatedTextItem response = getTranslationClient().translate("<html><body>This <b>is</b> a test.</body></html>", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); } @Test @Test public void translateWithAlignment() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs") .setIncludeAlignment(true); TranslatedTextItem response = getTranslationClient().translate("It is a beautiful morning", translateOptions); assertEquals(1, response.getTranslations().size()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertNotNull(response.getTranslations().get(0).getAlignment().getProjections()); } @Test public void translateWithIncludeSentenceLength() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("fr") .setIncludeSentenceLength(true); TranslatedTextItem response = getTranslationClient().translate("La réponse se trouve dans la traduction automatique. La meilleure technologie de traduction automatique ne peut pas toujours fournir des traductions adaptées à un site ou des utilisateurs comme un être humain. Il suffit de copier et coller un extrait de code n'importe où.", translateOptions); assertEquals("fr", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertEquals(3, response.getTranslations().get(0).getSentenceBoundaries().getSourceSentencesLengths().size()); assertEquals(3, response.getTranslations().get(0).getSentenceBoundaries().getTranslatedSentencesLengths().size()); } @Test public void translateWithCustomEndpoint() { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClientWithCustomEndpoint().translate("It is a beautiful morning", translateOptions); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertNotNull(response.getTranslations().get(0).getText()); } @Test public void translateWithToken() throws Exception { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClientWithToken().translate("This is a test.", translateOptions); assertNotNull(response.getTranslations().get(0).getText()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertNotNull(response.getTranslations().get(0).getText()); } @Test @PlaybackOnly public void translateWithAad() throws Exception { TranslateOptions translateOptions = new TranslateOptions() .addTargetLanguage("cs"); TranslatedTextItem response = getTranslationClientWithAadAuth().translate("This is a test.", translateOptions); assertNotNull(response.getTranslations().get(0).getText()); assertEquals("en", response.getDetectedLanguage().getLanguage()); assertEquals(1, response.getDetectedLanguage().getConfidence()); assertEquals(1, response.getTranslations().size()); assertNotNull(response.getTranslations().get(0).getText()); } }
Not related to the issue, though this method seemed not implemented before.
public String publicSettingsAsJsonString() { try { return ((ComputeManagementClientImpl) parent().manager().serviceClient()) .getSerializerAdapter().serialize(this.publicSettings, SerializerEncoding.JSON); } catch (IOException e) { return null; } }
}
public String publicSettingsAsJsonString() { try { return ((ComputeManagementClientImpl) parent().manager().serviceClient()) .getSerializerAdapter().serialize(this.publicSettings, SerializerEncoding.JSON); } catch (IOException e) { logger.atWarning().log("Serialization failed for publicSettings.", e); return null; } }
class VirtualMachineExtensionImpl extends ExternalChildResourceImpl< VirtualMachineExtension, VirtualMachineExtensionInner, VirtualMachineImpl, VirtualMachine> implements VirtualMachineExtension, VirtualMachineExtension.Definition<VirtualMachine.DefinitionStages.WithCreate>, VirtualMachineExtension.UpdateDefinition<VirtualMachine.Update>, VirtualMachineExtension.Update { private final ClientLogger logger = new ClientLogger(VirtualMachineExtensionImpl.class); private final VirtualMachineExtensionsClient client; private Map<String, Object> publicSettings; private Map<String, Object> protectedSettings; VirtualMachineExtensionImpl( String name, VirtualMachineImpl parent, VirtualMachineExtensionInner inner, VirtualMachineExtensionsClient client) { super(name, parent, inner); this.client = client; initializeSettings(); } protected static VirtualMachineExtensionImpl newVirtualMachineExtension( String name, VirtualMachineImpl parent, VirtualMachineExtensionsClient client) { VirtualMachineExtensionInner inner = new VirtualMachineExtensionInner(); inner.withLocation(parent.regionName()); VirtualMachineExtensionImpl extension = new VirtualMachineExtensionImpl(name, parent, inner, client); return extension; } @Override public String id() { return this.innerModel().id(); } @Override public String publisherName() { return this.innerModel().publisher(); } @Override public String typeName() { return this.innerModel().typePropertiesType(); } @Override public String versionName() { return this.innerModel().typeHandlerVersion(); } @Override public boolean autoUpgradeMinorVersionEnabled() { return this.innerModel().autoUpgradeMinorVersion(); } @Override public Map<String, Object> publicSettings() { return Collections.unmodifiableMap(this.publicSettings); } @Override @Override public VirtualMachineExtensionInstanceView getInstanceView() { return getInstanceViewAsync().block(); } @Override public Mono<VirtualMachineExtensionInstanceView> getInstanceViewAsync() { return this .client .getWithResponseAsync(this.parent().resourceGroupName(), this.parent().name(), this.name(), "instanceView") .flatMap(inner -> Mono.justOrEmpty(inner.getValue().instanceView())); } @Override public Map<String, String> tags() { Map<String, String> tags = this.innerModel().tags(); if (tags == null) { tags = new TreeMap<>(); } return Collections.unmodifiableMap(tags); } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public VirtualMachineExtensionImpl withMinorVersionAutoUpgrade() { this.innerModel().withAutoUpgradeMinorVersion(true); return this; } @Override public VirtualMachineExtensionImpl withoutMinorVersionAutoUpgrade() { this.innerModel().withAutoUpgradeMinorVersion(false); return this; } @Override public VirtualMachineExtensionImpl withImage(VirtualMachineExtensionImage image) { this .innerModel() .withPublisher(image.publisherName()) .withTypePropertiesType(image.typeName()) .withTypeHandlerVersion(image.versionName()); return this; } @Override public VirtualMachineExtensionImpl withPublisher(String extensionImagePublisherName) { this.innerModel().withPublisher(extensionImagePublisherName); return this; } @Override public VirtualMachineExtensionImpl withPublicSetting(String key, Object value) { this.publicSettings.put(key, value); return this; } @Override public VirtualMachineExtensionImpl withProtectedSetting(String key, Object value) { this.protectedSettings.put(key, value); return this; } @Override public VirtualMachineExtensionImpl withPublicSettings(HashMap<String, Object> settings) { this.publicSettings.clear(); this.publicSettings.putAll(settings); return this; } @Override public VirtualMachineExtensionImpl withProtectedSettings(HashMap<String, Object> settings) { this.protectedSettings.clear(); this.protectedSettings.putAll(settings); return this; } @Override public VirtualMachineExtensionImpl withType(String extensionImageTypeName) { this.innerModel().withTypePropertiesType(extensionImageTypeName); return this; } @Override public VirtualMachineExtensionImpl withVersion(String extensionImageVersionName) { this.innerModel().withTypeHandlerVersion(extensionImageVersionName); return this; } @Override public final VirtualMachineExtensionImpl withTags(Map<String, String> tags) { this.innerModel().withTags(new HashMap<>(tags)); return this; } @Override public final VirtualMachineExtensionImpl withTag(String key, String value) { if (this.innerModel().tags() == null) { this.innerModel().withTags(new HashMap<>()); } this.innerModel().tags().put(key, value); return this; } @Override public final VirtualMachineExtensionImpl withoutTag(String key) { if (this.innerModel().tags() != null) { this.innerModel().tags().remove(key); } return this; } @Override public VirtualMachineImpl attach() { this.nullifySettingsIfEmpty(); return this.parent().withExtension(this); } @Override protected Mono<VirtualMachineExtensionInner> getInnerAsync() { String name; if (this.isReference()) { name = ResourceUtils.nameFromResourceId(this.innerModel().id()); } else { name = this.innerModel().name(); } return this.client.getAsync(this.parent().resourceGroupName(), this.parent().name(), name); } @Override public Mono<VirtualMachineExtension> createResourceAsync() { final VirtualMachineExtensionImpl self = this; return this .client .createOrUpdateAsync( this.parent().resourceGroupName(), this.parent().name(), this.name(), this.innerModel()) .map( inner -> { self.setInner(inner); self.initializeSettings(); return self; }); } @Override @SuppressWarnings("unchecked") public Mono<VirtualMachineExtension> updateResourceAsync() { this.nullifySettingsIfEmpty(); if (this.isReference()) { String extensionName = ResourceUtils.nameFromResourceId(this.innerModel().id()); return this .client .getAsync(this.parent().resourceGroupName(), this.parent().name(), extensionName) .flatMap( resource -> { innerModel() .withPublisher(resource.publisher()) .withTypePropertiesType(resource.typePropertiesType()) .withTypeHandlerVersion(resource.typeHandlerVersion()); if (innerModel().autoUpgradeMinorVersion() == null) { innerModel().withAutoUpgradeMinorVersion(resource.autoUpgradeMinorVersion()); } LinkedHashMap<String, Object> publicSettings = (LinkedHashMap<String, Object>) resource.settings(); if (publicSettings != null && publicSettings.size() > 0) { LinkedHashMap<String, Object> innerPublicSettings = (LinkedHashMap<String, Object>) innerModel().settings(); if (innerPublicSettings == null) { innerModel().withSettings(new LinkedHashMap<String, Object>()); innerPublicSettings = (LinkedHashMap<String, Object>) innerModel().settings(); } for (Map.Entry<String, Object> entry : publicSettings.entrySet()) { if (!innerPublicSettings.containsKey(entry.getKey())) { innerPublicSettings.put(entry.getKey(), entry.getValue()); } } } return createResourceAsync(); }); } else { return this.createResourceAsync(); } } @Override public Mono<Void> deleteResourceAsync() { return this.client.deleteAsync(this.parent().resourceGroupName(), this.parent().name(), this.name()); } /** * @return true if this is just a reference to the extension. * <p>An extension will present as a reference when the parent virtual machine was fetched using VM list, a GET * on a specific VM will return fully expanded extension details. */ public boolean isReference() { return this.innerModel().name() == null; } private void nullifySettingsIfEmpty() { if (this.publicSettings.size() == 0) { this.innerModel().withSettings(null); } if (this.protectedSettings.size() == 0) { this.innerModel().withProtectedSettings(null); } } private void initializeSettings() { if (this.innerModel().settings() == null) { this.publicSettings = new LinkedHashMap<>(); this.innerModel().withSettings(this.publicSettings); } else { this.publicSettings = loadSettings(this.innerModel().settings()); } if (this.innerModel().protectedSettings() == null) { this.protectedSettings = new LinkedHashMap<>(); this.innerModel().withProtectedSettings(this.protectedSettings); } else { this.protectedSettings = loadSettings(this.innerModel().protectedSettings()); } } @SuppressWarnings("unchecked") private Map<String, Object> loadSettings(Object settings) { if (settings instanceof String) { try (JsonReader jsonReader = JsonProviders.createReader((String) settings)) { return jsonReader.readMap(JsonReader::readUntyped); } catch (IOException e) { logger.atWarning().log("[VirtualMachineExtensionImpl] invalid String setting: {}", settings); return new LinkedHashMap<>(); } } else if (settings instanceof Map) { return (Map<String, Object>) settings; } else { logger.atWarning().log("[VirtualMachineExtensionImpl] unrecognized setting type: {}, value: {}", settings.getClass(), settings); return new LinkedHashMap<>(); } } }
class VirtualMachineExtensionImpl extends ExternalChildResourceImpl< VirtualMachineExtension, VirtualMachineExtensionInner, VirtualMachineImpl, VirtualMachine> implements VirtualMachineExtension, VirtualMachineExtension.Definition<VirtualMachine.DefinitionStages.WithCreate>, VirtualMachineExtension.UpdateDefinition<VirtualMachine.Update>, VirtualMachineExtension.Update { private final ClientLogger logger = new ClientLogger(VirtualMachineExtensionImpl.class); private final VirtualMachineExtensionsClient client; private Map<String, Object> publicSettings; private Map<String, Object> protectedSettings; VirtualMachineExtensionImpl( String name, VirtualMachineImpl parent, VirtualMachineExtensionInner inner, VirtualMachineExtensionsClient client) { super(name, parent, inner); this.client = client; initializeSettings(); } protected static VirtualMachineExtensionImpl newVirtualMachineExtension( String name, VirtualMachineImpl parent, VirtualMachineExtensionsClient client) { VirtualMachineExtensionInner inner = new VirtualMachineExtensionInner(); inner.withLocation(parent.regionName()); VirtualMachineExtensionImpl extension = new VirtualMachineExtensionImpl(name, parent, inner, client); return extension; } @Override public String id() { return this.innerModel().id(); } @Override public String publisherName() { return this.innerModel().publisher(); } @Override public String typeName() { return this.innerModel().typePropertiesType(); } @Override public String versionName() { return this.innerModel().typeHandlerVersion(); } @Override public boolean autoUpgradeMinorVersionEnabled() { return this.innerModel().autoUpgradeMinorVersion(); } @Override public Map<String, Object> publicSettings() { return Collections.unmodifiableMap(this.publicSettings); } @Override @Override public VirtualMachineExtensionInstanceView getInstanceView() { return getInstanceViewAsync().block(); } @Override public Mono<VirtualMachineExtensionInstanceView> getInstanceViewAsync() { return this .client .getWithResponseAsync(this.parent().resourceGroupName(), this.parent().name(), this.name(), "instanceView") .flatMap(inner -> Mono.justOrEmpty(inner.getValue().instanceView())); } @Override public Map<String, String> tags() { Map<String, String> tags = this.innerModel().tags(); if (tags == null) { tags = new TreeMap<>(); } return Collections.unmodifiableMap(tags); } @Override public String provisioningState() { return this.innerModel().provisioningState(); } @Override public VirtualMachineExtensionImpl withMinorVersionAutoUpgrade() { this.innerModel().withAutoUpgradeMinorVersion(true); return this; } @Override public VirtualMachineExtensionImpl withoutMinorVersionAutoUpgrade() { this.innerModel().withAutoUpgradeMinorVersion(false); return this; } @Override public VirtualMachineExtensionImpl withImage(VirtualMachineExtensionImage image) { this .innerModel() .withPublisher(image.publisherName()) .withTypePropertiesType(image.typeName()) .withTypeHandlerVersion(image.versionName()); return this; } @Override public VirtualMachineExtensionImpl withPublisher(String extensionImagePublisherName) { this.innerModel().withPublisher(extensionImagePublisherName); return this; } @Override public VirtualMachineExtensionImpl withPublicSetting(String key, Object value) { this.publicSettings.put(key, value); return this; } @Override public VirtualMachineExtensionImpl withProtectedSetting(String key, Object value) { this.protectedSettings.put(key, value); return this; } @Override public VirtualMachineExtensionImpl withPublicSettings(HashMap<String, Object> settings) { this.publicSettings.clear(); this.publicSettings.putAll(settings); return this; } @Override public VirtualMachineExtensionImpl withProtectedSettings(HashMap<String, Object> settings) { this.protectedSettings.clear(); this.protectedSettings.putAll(settings); return this; } @Override public VirtualMachineExtensionImpl withType(String extensionImageTypeName) { this.innerModel().withTypePropertiesType(extensionImageTypeName); return this; } @Override public VirtualMachineExtensionImpl withVersion(String extensionImageVersionName) { this.innerModel().withTypeHandlerVersion(extensionImageVersionName); return this; } @Override public final VirtualMachineExtensionImpl withTags(Map<String, String> tags) { this.innerModel().withTags(new HashMap<>(tags)); return this; } @Override public final VirtualMachineExtensionImpl withTag(String key, String value) { if (this.innerModel().tags() == null) { this.innerModel().withTags(new HashMap<>()); } this.innerModel().tags().put(key, value); return this; } @Override public final VirtualMachineExtensionImpl withoutTag(String key) { if (this.innerModel().tags() != null) { this.innerModel().tags().remove(key); } return this; } @Override public VirtualMachineImpl attach() { this.nullifySettingsIfEmpty(); return this.parent().withExtension(this); } @Override protected Mono<VirtualMachineExtensionInner> getInnerAsync() { String name; if (this.isReference()) { name = ResourceUtils.nameFromResourceId(this.innerModel().id()); } else { name = this.innerModel().name(); } return this.client.getAsync(this.parent().resourceGroupName(), this.parent().name(), name); } @Override public Mono<VirtualMachineExtension> createResourceAsync() { final VirtualMachineExtensionImpl self = this; return this .client .createOrUpdateAsync( this.parent().resourceGroupName(), this.parent().name(), this.name(), this.innerModel()) .map( inner -> { self.setInner(inner); self.initializeSettings(); return self; }); } @Override @SuppressWarnings("unchecked") public Mono<VirtualMachineExtension> updateResourceAsync() { this.nullifySettingsIfEmpty(); if (this.isReference()) { String extensionName = ResourceUtils.nameFromResourceId(this.innerModel().id()); return this .client .getAsync(this.parent().resourceGroupName(), this.parent().name(), extensionName) .flatMap( resource -> { innerModel() .withPublisher(resource.publisher()) .withTypePropertiesType(resource.typePropertiesType()) .withTypeHandlerVersion(resource.typeHandlerVersion()); if (innerModel().autoUpgradeMinorVersion() == null) { innerModel().withAutoUpgradeMinorVersion(resource.autoUpgradeMinorVersion()); } LinkedHashMap<String, Object> publicSettings = (LinkedHashMap<String, Object>) resource.settings(); if (publicSettings != null && publicSettings.size() > 0) { LinkedHashMap<String, Object> innerPublicSettings = (LinkedHashMap<String, Object>) innerModel().settings(); if (innerPublicSettings == null) { innerModel().withSettings(new LinkedHashMap<String, Object>()); innerPublicSettings = (LinkedHashMap<String, Object>) innerModel().settings(); } for (Map.Entry<String, Object> entry : publicSettings.entrySet()) { if (!innerPublicSettings.containsKey(entry.getKey())) { innerPublicSettings.put(entry.getKey(), entry.getValue()); } } } return createResourceAsync(); }); } else { return this.createResourceAsync(); } } @Override public Mono<Void> deleteResourceAsync() { return this.client.deleteAsync(this.parent().resourceGroupName(), this.parent().name(), this.name()); } /** * @return true if this is just a reference to the extension. * <p>An extension will present as a reference when the parent virtual machine was fetched using VM list, a GET * on a specific VM will return fully expanded extension details. */ public boolean isReference() { return this.innerModel().name() == null; } private void nullifySettingsIfEmpty() { if (this.publicSettings.size() == 0) { this.innerModel().withSettings(null); } if (this.protectedSettings.size() == 0) { this.innerModel().withProtectedSettings(null); } } private void initializeSettings() { if (this.innerModel().settings() == null) { this.publicSettings = new LinkedHashMap<>(); this.innerModel().withSettings(this.publicSettings); } else { this.publicSettings = loadSettings(this.innerModel().settings()); } if (this.innerModel().protectedSettings() == null) { this.protectedSettings = new LinkedHashMap<>(); this.innerModel().withProtectedSettings(this.protectedSettings); } else { this.protectedSettings = loadSettings(this.innerModel().protectedSettings()); } } @SuppressWarnings("unchecked") private Map<String, Object> loadSettings(Object settings) { if (settings instanceof String) { try (JsonReader jsonReader = JsonProviders.createReader((String) settings)) { return jsonReader.readMap(JsonReader::readUntyped); } catch (IOException e) { logger.atWarning().log("[VirtualMachineExtensionImpl] invalid String setting: {}", settings); return new LinkedHashMap<>(); } } else if (settings instanceof Map) { return (Map<String, Object>) settings; } else { logger.atWarning().log("[VirtualMachineExtensionImpl] unrecognized setting type: {}, value: {}", settings.getClass(), settings); return new LinkedHashMap<>(); } } }
add new sanitizer for `StorageAccountKey`
private void addSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList( new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER), new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"), new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.protectedSettings.storageAccountKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY) )); sanitizers.addAll(this.sanitizers); interceptorManager.addSanitizers(sanitizers); }
new TestProxySanitizer("$.properties.protectedSettings.storageAccountKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
private void addSanitizers() { List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList( new TestProxySanitizer(SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL), new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER), new TestProxySanitizer("$..id", SUBSCRIPTION_ID_REGEX, ZERO_UUID, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"), new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), new TestProxySanitizer("$.properties.protectedSettings.storageAccountKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY) )); sanitizers.addAll(this.sanitizers); interceptorManager.addSanitizers(sanitizers); }
class of the manager * @param httpPipeline the http pipeline * @param profile the azure profile * @param <T> the type of the manager * @return the manager instance * @throws RuntimeException when field cannot be found or set. */ protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) { try { Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass()); setAccessible(constructor); return constructor.newInstance(httpPipeline, profile); } catch (ReflectiveOperationException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } }
class of the manager * @param httpPipeline the http pipeline * @param profile the azure profile * @param <T> the type of the manager * @return the manager instance * @throws RuntimeException when field cannot be found or set. */ protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) { try { Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass()); setAccessible(constructor); return constructor.newInstance(httpPipeline, profile); } catch (ReflectiveOperationException ex) { throw LOGGER.logExceptionAsError(new RuntimeException(ex)); } }
Not sure using a different string than we use above is worth it since we already require https anyway?
private Mono<Void> setAuthorizationHeaderHelper(HttpPipelineCallContext context, boolean checkToForceFetchToken) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } PopTokenRequestContext popTokenRequestContext = new PopTokenRequestContext().setScopes(this.scopes) .setParentRequestId(context.getHttpRequest().getHeaders().getValue(HttpHeaderName.X_MS_CLIENT_REQUEST_ID)) .setProofOfPossessionNonce(popNonce) .setRequest(context.getHttpRequest()); if (!CoreUtils.isNullOrEmpty(popNonce)) { return this.cache.getToken(popTokenRequestContext, checkToForceFetchToken).flatMap((token) -> { setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken()); return Mono.empty(); }); } return Mono.empty(); }
"Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints."));
private Mono<Void> setAuthorizationHeaderHelper(HttpPipelineCallContext context, boolean checkToForceFetchToken) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } PopTokenRequestContext popTokenRequestContext = new PopTokenRequestContext().addScopes(this.scopes.get(0)) .setProofOfPossessionNonce(popNonce) .setResourceRequestUrl(context.getHttpRequest().getUrl()) .setResourceRequestMethod(context.getHttpRequest().getHttpMethod()); if (!CoreUtils.isNullOrEmpty(popNonce)) { return this.cache.getToken(popTokenRequestContext, checkToForceFetchToken).flatMap((token) -> { setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken()); return Mono.empty(); }); } return Mono.empty(); }
class PopTokenAuthenticationPolicy implements HttpPipelinePolicy { private static final ClientLogger LOGGER = new ClientLogger(PopTokenAuthenticationPolicy.class); private final List<String> scopes = new ArrayList<>(); private final AccessTokenCache cache; private String popNonce; /** * Creates a new instance of the PopTokenAuthenticationPolicy. * @param credential The credential to use for authentication. * @param scopes The scopes required for the token. */ public PopTokenAuthenticationPolicy(SupportsProofOfPossession credential, String... scopes) { Objects.requireNonNull(credential); this.scopes.clear(); this.scopes.addAll(Arrays.asList(scopes)); this.cache = new AccessTokenCache(credential); } /** * Authorizes the request. * @param context The context of the request. * @return A {@link Mono} containing {@link Void} . */ public Mono<Void> authorizeRequest(HttpPipelineCallContext context) { return this.scopes == null ? Mono.empty() : this.setAuthorizationHeaderHelper(context, false); } /** * Authorizes the request synchronously. * @param context The context of the request. */ public void authorizeRequestSync(HttpPipelineCallContext context) { this.setAuthorizationHeaderHelperSync(context, false); } /** * Authorizes the request on challenge. * @param context The context of the request. * @param response The response of the request. * @return A {@link Mono} containing a {@link Boolean} indicating if the request was authorized. */ public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(response, "PoP", "nonce"); if (CoreUtils.isNullOrEmpty(popNonce)) { return Mono.just(false); } return this.scopes == null ? Mono.just(false) : this.setAuthorizationHeaderHelper(context, true).flatMap((ignored) -> Mono.just(true)); } /** * Authorizes the request on challenge synchronously. * @param context The context of the request. * @param response The response of the request. * @return A {@link Boolean} indicating if the request was authorized. */ public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(response, "PoP", "nonce"); if (CoreUtils.isNullOrEmpty(popNonce)) { return false; } if (this.scopes == null) { return false; } else { this.setAuthorizationHeaderHelperSync(context, true); return true; } } /** * Processes the request. * @param context The context of the request. * @param next The next policy in the pipeline. * @return A {@link Mono} containing the {@link HttpResponse}. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { return Mono.error(new RuntimeException("token credentials require a URL using the HTTPS protocol scheme")); } HttpPipelineNextPolicy nextPolicy = next.clone(); return authorizeRequest(context).then(Mono.defer(next::process)).flatMap(httpResponse -> { String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE); if (httpResponse.getStatusCode() == 401 && authHeader != null) { return authorizeRequestOnChallenge(context, httpResponse).flatMap(authorized -> { if (authorized) { httpResponse.close(); return nextPolicy.process(); } else { return Mono.just(httpResponse); } }); } else if (authHeader != null) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(httpResponse, "PoP", "nonce"); } return Mono.just(httpResponse); }); } /** * Processes the request synchronously. * @param context The context of the request. * @param next The next policy in the pipeline. * @return The {@link HttpResponse}. */ public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError( new RuntimeException("token credentials require a URL using the HTTPS protocol scheme")); } else { HttpPipelineNextSyncPolicy nextPolicy = next.clone(); this.authorizeRequestSync(context); HttpResponse httpResponse = next.processSync(); String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE); if (httpResponse.getStatusCode() == 401 && authHeader != null) { if (this.authorizeRequestOnChallengeSync(context, httpResponse)) { httpResponse.close(); return nextPolicy.processSync(); } else { return httpResponse; } } else if (authHeader != null) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(httpResponse, "PoP", "nonce"); return httpResponse; } else { return httpResponse; } } } private void setAuthorizationHeaderHelperSync(HttpPipelineCallContext context, boolean checkToForceFetchToken) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } PopTokenRequestContext popTokenRequestContext = new PopTokenRequestContext().setScopes(this.scopes) .setParentRequestId(context.getHttpRequest().getHeaders().getValue(HttpHeaderName.X_MS_CLIENT_REQUEST_ID)) .setProofOfPossessionNonce(popNonce); AccessToken token = this.cache.getTokenSync(popTokenRequestContext, checkToForceFetchToken); setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken()); } private static void setAuthorizationHeader(HttpHeaders headers, String token) { headers.set(HttpHeaderName.AUTHORIZATION, "Pop " + token); } }
class PopTokenAuthenticationPolicy implements HttpPipelinePolicy { private static final ClientLogger LOGGER = new ClientLogger(PopTokenAuthenticationPolicy.class); private final List<String> scopes = new ArrayList<>(); private final AccessTokenCache cache; private String popNonce; /** * Creates a new instance of the PopTokenAuthenticationPolicy. * @param credential The credential to use for authentication. * @param scopes The scopes required for the token. */ public PopTokenAuthenticationPolicy(TokenCredential credential, String... scopes) { Objects.requireNonNull(credential); this.scopes.clear(); this.scopes.addAll(Arrays.asList(scopes)); this.cache = new AccessTokenCache(credential); } /** * Authorizes the request. * @param context The context of the request. * @return A {@link Mono} containing {@link Void} . */ public Mono<Void> authorizeRequest(HttpPipelineCallContext context) { return this.scopes == null ? Mono.empty() : this.setAuthorizationHeaderHelper(context, false); } /** * Authorizes the request synchronously. * @param context The context of the request. */ public void authorizeRequestSync(HttpPipelineCallContext context) { this.setAuthorizationHeaderHelperSync(context, false); } /** * Authorizes the request on challenge. * @param context The context of the request. * @param response The response of the request. * @return A {@link Mono} containing a {@link Boolean} indicating if the request was authorized. */ public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) { popNonce = getChallengeParameterFromResponse(response, "PoP", "nonce"); if (CoreUtils.isNullOrEmpty(popNonce)) { return Mono.just(false); } return this.scopes == null ? Mono.just(false) : this.setAuthorizationHeaderHelper(context, true).flatMap((ignored) -> Mono.just(true)); } /** * Authorizes the request on challenge synchronously. * @param context The context of the request. * @param response The response of the request. * @return A {@link Boolean} indicating if the request was authorized. */ public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(response, "PoP", "nonce"); if (CoreUtils.isNullOrEmpty(popNonce)) { return false; } if (this.scopes == null) { return false; } else { this.setAuthorizationHeaderHelperSync(context, true); return true; } } /** * Processes the request. * @param context The context of the request. * @param next The next policy in the pipeline. * @return A {@link Mono} containing the {@link HttpResponse}. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { return Mono.error(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } HttpPipelineNextPolicy nextPolicy = next.clone(); return authorizeRequest(context).then(Mono.defer(next::process)).flatMap(httpResponse -> { String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE); if (httpResponse.getStatusCode() == 401 && authHeader != null) { return authorizeRequestOnChallenge(context, httpResponse).flatMap(authorized -> { if (authorized) { httpResponse.close(); return nextPolicy.process(); } else { return Mono.just(httpResponse); } }); } else if (authHeader != null) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(httpResponse, "PoP", "nonce"); } return Mono.just(httpResponse); }); } /** * Processes the request synchronously. * @param context The context of the request. * @param next The next policy in the pipeline. * @return The {@link HttpResponse}. */ public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } else { HttpPipelineNextSyncPolicy nextPolicy = next.clone(); this.authorizeRequestSync(context); HttpResponse httpResponse = next.processSync(); String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE); if (httpResponse.getStatusCode() == 401 && authHeader != null) { if (this.authorizeRequestOnChallengeSync(context, httpResponse)) { httpResponse.close(); return nextPolicy.processSync(); } else { return httpResponse; } } else if (authHeader != null) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(httpResponse, "PoP", "nonce"); return httpResponse; } else { return httpResponse; } } } private void setAuthorizationHeaderHelperSync(HttpPipelineCallContext context, boolean checkToForceFetchToken) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } PopTokenRequestContext popTokenRequestContext = new PopTokenRequestContext().addScopes(this.scopes.get(0)) .setProofOfPossessionNonce(popNonce) .setResourceRequestUrl(context.getHttpRequest().getUrl()) .setResourceRequestMethod(context.getHttpRequest().getHttpMethod()); AccessToken token = this.cache.getTokenSync(popTokenRequestContext, checkToForceFetchToken); setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken()); } private static void setAuthorizationHeader(HttpHeaders headers, String token) { headers.set(HttpHeaderName.AUTHORIZATION, "Pop " + token); } }
can be removed yes, will remove it next commit.
private Mono<Void> setAuthorizationHeaderHelper(HttpPipelineCallContext context, boolean checkToForceFetchToken) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } PopTokenRequestContext popTokenRequestContext = new PopTokenRequestContext().setScopes(this.scopes) .setParentRequestId(context.getHttpRequest().getHeaders().getValue(HttpHeaderName.X_MS_CLIENT_REQUEST_ID)) .setProofOfPossessionNonce(popNonce) .setRequest(context.getHttpRequest()); if (!CoreUtils.isNullOrEmpty(popNonce)) { return this.cache.getToken(popTokenRequestContext, checkToForceFetchToken).flatMap((token) -> { setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken()); return Mono.empty(); }); } return Mono.empty(); }
"Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints."));
private Mono<Void> setAuthorizationHeaderHelper(HttpPipelineCallContext context, boolean checkToForceFetchToken) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } PopTokenRequestContext popTokenRequestContext = new PopTokenRequestContext().addScopes(this.scopes.get(0)) .setProofOfPossessionNonce(popNonce) .setResourceRequestUrl(context.getHttpRequest().getUrl()) .setResourceRequestMethod(context.getHttpRequest().getHttpMethod()); if (!CoreUtils.isNullOrEmpty(popNonce)) { return this.cache.getToken(popTokenRequestContext, checkToForceFetchToken).flatMap((token) -> { setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken()); return Mono.empty(); }); } return Mono.empty(); }
class PopTokenAuthenticationPolicy implements HttpPipelinePolicy { private static final ClientLogger LOGGER = new ClientLogger(PopTokenAuthenticationPolicy.class); private final List<String> scopes = new ArrayList<>(); private final AccessTokenCache cache; private String popNonce; /** * Creates a new instance of the PopTokenAuthenticationPolicy. * @param credential The credential to use for authentication. * @param scopes The scopes required for the token. */ public PopTokenAuthenticationPolicy(SupportsProofOfPossession credential, String... scopes) { Objects.requireNonNull(credential); this.scopes.clear(); this.scopes.addAll(Arrays.asList(scopes)); this.cache = new AccessTokenCache(credential); } /** * Authorizes the request. * @param context The context of the request. * @return A {@link Mono} containing {@link Void} . */ public Mono<Void> authorizeRequest(HttpPipelineCallContext context) { return this.scopes == null ? Mono.empty() : this.setAuthorizationHeaderHelper(context, false); } /** * Authorizes the request synchronously. * @param context The context of the request. */ public void authorizeRequestSync(HttpPipelineCallContext context) { this.setAuthorizationHeaderHelperSync(context, false); } /** * Authorizes the request on challenge. * @param context The context of the request. * @param response The response of the request. * @return A {@link Mono} containing a {@link Boolean} indicating if the request was authorized. */ public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(response, "PoP", "nonce"); if (CoreUtils.isNullOrEmpty(popNonce)) { return Mono.just(false); } return this.scopes == null ? Mono.just(false) : this.setAuthorizationHeaderHelper(context, true).flatMap((ignored) -> Mono.just(true)); } /** * Authorizes the request on challenge synchronously. * @param context The context of the request. * @param response The response of the request. * @return A {@link Boolean} indicating if the request was authorized. */ public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(response, "PoP", "nonce"); if (CoreUtils.isNullOrEmpty(popNonce)) { return false; } if (this.scopes == null) { return false; } else { this.setAuthorizationHeaderHelperSync(context, true); return true; } } /** * Processes the request. * @param context The context of the request. * @param next The next policy in the pipeline. * @return A {@link Mono} containing the {@link HttpResponse}. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { return Mono.error(new RuntimeException("token credentials require a URL using the HTTPS protocol scheme")); } HttpPipelineNextPolicy nextPolicy = next.clone(); return authorizeRequest(context).then(Mono.defer(next::process)).flatMap(httpResponse -> { String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE); if (httpResponse.getStatusCode() == 401 && authHeader != null) { return authorizeRequestOnChallenge(context, httpResponse).flatMap(authorized -> { if (authorized) { httpResponse.close(); return nextPolicy.process(); } else { return Mono.just(httpResponse); } }); } else if (authHeader != null) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(httpResponse, "PoP", "nonce"); } return Mono.just(httpResponse); }); } /** * Processes the request synchronously. * @param context The context of the request. * @param next The next policy in the pipeline. * @return The {@link HttpResponse}. */ public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError( new RuntimeException("token credentials require a URL using the HTTPS protocol scheme")); } else { HttpPipelineNextSyncPolicy nextPolicy = next.clone(); this.authorizeRequestSync(context); HttpResponse httpResponse = next.processSync(); String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE); if (httpResponse.getStatusCode() == 401 && authHeader != null) { if (this.authorizeRequestOnChallengeSync(context, httpResponse)) { httpResponse.close(); return nextPolicy.processSync(); } else { return httpResponse; } } else if (authHeader != null) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(httpResponse, "PoP", "nonce"); return httpResponse; } else { return httpResponse; } } } private void setAuthorizationHeaderHelperSync(HttpPipelineCallContext context, boolean checkToForceFetchToken) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } PopTokenRequestContext popTokenRequestContext = new PopTokenRequestContext().setScopes(this.scopes) .setParentRequestId(context.getHttpRequest().getHeaders().getValue(HttpHeaderName.X_MS_CLIENT_REQUEST_ID)) .setProofOfPossessionNonce(popNonce); AccessToken token = this.cache.getTokenSync(popTokenRequestContext, checkToForceFetchToken); setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken()); } private static void setAuthorizationHeader(HttpHeaders headers, String token) { headers.set(HttpHeaderName.AUTHORIZATION, "Pop " + token); } }
class PopTokenAuthenticationPolicy implements HttpPipelinePolicy { private static final ClientLogger LOGGER = new ClientLogger(PopTokenAuthenticationPolicy.class); private final List<String> scopes = new ArrayList<>(); private final AccessTokenCache cache; private String popNonce; /** * Creates a new instance of the PopTokenAuthenticationPolicy. * @param credential The credential to use for authentication. * @param scopes The scopes required for the token. */ public PopTokenAuthenticationPolicy(TokenCredential credential, String... scopes) { Objects.requireNonNull(credential); this.scopes.clear(); this.scopes.addAll(Arrays.asList(scopes)); this.cache = new AccessTokenCache(credential); } /** * Authorizes the request. * @param context The context of the request. * @return A {@link Mono} containing {@link Void} . */ public Mono<Void> authorizeRequest(HttpPipelineCallContext context) { return this.scopes == null ? Mono.empty() : this.setAuthorizationHeaderHelper(context, false); } /** * Authorizes the request synchronously. * @param context The context of the request. */ public void authorizeRequestSync(HttpPipelineCallContext context) { this.setAuthorizationHeaderHelperSync(context, false); } /** * Authorizes the request on challenge. * @param context The context of the request. * @param response The response of the request. * @return A {@link Mono} containing a {@link Boolean} indicating if the request was authorized. */ public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) { popNonce = getChallengeParameterFromResponse(response, "PoP", "nonce"); if (CoreUtils.isNullOrEmpty(popNonce)) { return Mono.just(false); } return this.scopes == null ? Mono.just(false) : this.setAuthorizationHeaderHelper(context, true).flatMap((ignored) -> Mono.just(true)); } /** * Authorizes the request on challenge synchronously. * @param context The context of the request. * @param response The response of the request. * @return A {@link Boolean} indicating if the request was authorized. */ public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(response, "PoP", "nonce"); if (CoreUtils.isNullOrEmpty(popNonce)) { return false; } if (this.scopes == null) { return false; } else { this.setAuthorizationHeaderHelperSync(context, true); return true; } } /** * Processes the request. * @param context The context of the request. * @param next The next policy in the pipeline. * @return A {@link Mono} containing the {@link HttpResponse}. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { return Mono.error(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } HttpPipelineNextPolicy nextPolicy = next.clone(); return authorizeRequest(context).then(Mono.defer(next::process)).flatMap(httpResponse -> { String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE); if (httpResponse.getStatusCode() == 401 && authHeader != null) { return authorizeRequestOnChallenge(context, httpResponse).flatMap(authorized -> { if (authorized) { httpResponse.close(); return nextPolicy.process(); } else { return Mono.just(httpResponse); } }); } else if (authHeader != null) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(httpResponse, "PoP", "nonce"); } return Mono.just(httpResponse); }); } /** * Processes the request synchronously. * @param context The context of the request. * @param next The next policy in the pipeline. * @return The {@link HttpResponse}. */ public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } else { HttpPipelineNextSyncPolicy nextPolicy = next.clone(); this.authorizeRequestSync(context); HttpResponse httpResponse = next.processSync(); String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE); if (httpResponse.getStatusCode() == 401 && authHeader != null) { if (this.authorizeRequestOnChallengeSync(context, httpResponse)) { httpResponse.close(); return nextPolicy.processSync(); } else { return httpResponse; } } else if (authHeader != null) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(httpResponse, "PoP", "nonce"); return httpResponse; } else { return httpResponse; } } } private void setAuthorizationHeaderHelperSync(HttpPipelineCallContext context, boolean checkToForceFetchToken) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } PopTokenRequestContext popTokenRequestContext = new PopTokenRequestContext().addScopes(this.scopes.get(0)) .setProofOfPossessionNonce(popNonce) .setResourceRequestUrl(context.getHttpRequest().getUrl()) .setResourceRequestMethod(context.getHttpRequest().getHttpMethod()); AccessToken token = this.cache.getTokenSync(popTokenRequestContext, checkToForceFetchToken); setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken()); } private static void setAuthorizationHeader(HttpHeaders headers, String token) { headers.set(HttpHeaderName.AUTHORIZATION, "Pop " + token); } }
on second look, authorizeRequest and authorizeRequestOnChallenge public APIs directly call this method too. So, need to retain this check here. Aligned the error message to be same in all spots.
private Mono<Void> setAuthorizationHeaderHelper(HttpPipelineCallContext context, boolean checkToForceFetchToken) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } PopTokenRequestContext popTokenRequestContext = new PopTokenRequestContext().setScopes(this.scopes) .setParentRequestId(context.getHttpRequest().getHeaders().getValue(HttpHeaderName.X_MS_CLIENT_REQUEST_ID)) .setProofOfPossessionNonce(popNonce) .setRequest(context.getHttpRequest()); if (!CoreUtils.isNullOrEmpty(popNonce)) { return this.cache.getToken(popTokenRequestContext, checkToForceFetchToken).flatMap((token) -> { setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken()); return Mono.empty(); }); } return Mono.empty(); }
"Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints."));
private Mono<Void> setAuthorizationHeaderHelper(HttpPipelineCallContext context, boolean checkToForceFetchToken) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } PopTokenRequestContext popTokenRequestContext = new PopTokenRequestContext().addScopes(this.scopes.get(0)) .setProofOfPossessionNonce(popNonce) .setResourceRequestUrl(context.getHttpRequest().getUrl()) .setResourceRequestMethod(context.getHttpRequest().getHttpMethod()); if (!CoreUtils.isNullOrEmpty(popNonce)) { return this.cache.getToken(popTokenRequestContext, checkToForceFetchToken).flatMap((token) -> { setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken()); return Mono.empty(); }); } return Mono.empty(); }
class PopTokenAuthenticationPolicy implements HttpPipelinePolicy { private static final ClientLogger LOGGER = new ClientLogger(PopTokenAuthenticationPolicy.class); private final List<String> scopes = new ArrayList<>(); private final AccessTokenCache cache; private String popNonce; /** * Creates a new instance of the PopTokenAuthenticationPolicy. * @param credential The credential to use for authentication. * @param scopes The scopes required for the token. */ public PopTokenAuthenticationPolicy(SupportsProofOfPossession credential, String... scopes) { Objects.requireNonNull(credential); this.scopes.clear(); this.scopes.addAll(Arrays.asList(scopes)); this.cache = new AccessTokenCache(credential); } /** * Authorizes the request. * @param context The context of the request. * @return A {@link Mono} containing {@link Void} . */ public Mono<Void> authorizeRequest(HttpPipelineCallContext context) { return this.scopes == null ? Mono.empty() : this.setAuthorizationHeaderHelper(context, false); } /** * Authorizes the request synchronously. * @param context The context of the request. */ public void authorizeRequestSync(HttpPipelineCallContext context) { this.setAuthorizationHeaderHelperSync(context, false); } /** * Authorizes the request on challenge. * @param context The context of the request. * @param response The response of the request. * @return A {@link Mono} containing a {@link Boolean} indicating if the request was authorized. */ public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(response, "PoP", "nonce"); if (CoreUtils.isNullOrEmpty(popNonce)) { return Mono.just(false); } return this.scopes == null ? Mono.just(false) : this.setAuthorizationHeaderHelper(context, true).flatMap((ignored) -> Mono.just(true)); } /** * Authorizes the request on challenge synchronously. * @param context The context of the request. * @param response The response of the request. * @return A {@link Boolean} indicating if the request was authorized. */ public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(response, "PoP", "nonce"); if (CoreUtils.isNullOrEmpty(popNonce)) { return false; } if (this.scopes == null) { return false; } else { this.setAuthorizationHeaderHelperSync(context, true); return true; } } /** * Processes the request. * @param context The context of the request. * @param next The next policy in the pipeline. * @return A {@link Mono} containing the {@link HttpResponse}. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { return Mono.error(new RuntimeException("token credentials require a URL using the HTTPS protocol scheme")); } HttpPipelineNextPolicy nextPolicy = next.clone(); return authorizeRequest(context).then(Mono.defer(next::process)).flatMap(httpResponse -> { String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE); if (httpResponse.getStatusCode() == 401 && authHeader != null) { return authorizeRequestOnChallenge(context, httpResponse).flatMap(authorized -> { if (authorized) { httpResponse.close(); return nextPolicy.process(); } else { return Mono.just(httpResponse); } }); } else if (authHeader != null) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(httpResponse, "PoP", "nonce"); } return Mono.just(httpResponse); }); } /** * Processes the request synchronously. * @param context The context of the request. * @param next The next policy in the pipeline. * @return The {@link HttpResponse}. */ public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError( new RuntimeException("token credentials require a URL using the HTTPS protocol scheme")); } else { HttpPipelineNextSyncPolicy nextPolicy = next.clone(); this.authorizeRequestSync(context); HttpResponse httpResponse = next.processSync(); String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE); if (httpResponse.getStatusCode() == 401 && authHeader != null) { if (this.authorizeRequestOnChallengeSync(context, httpResponse)) { httpResponse.close(); return nextPolicy.processSync(); } else { return httpResponse; } } else if (authHeader != null) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(httpResponse, "PoP", "nonce"); return httpResponse; } else { return httpResponse; } } } private void setAuthorizationHeaderHelperSync(HttpPipelineCallContext context, boolean checkToForceFetchToken) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } PopTokenRequestContext popTokenRequestContext = new PopTokenRequestContext().setScopes(this.scopes) .setParentRequestId(context.getHttpRequest().getHeaders().getValue(HttpHeaderName.X_MS_CLIENT_REQUEST_ID)) .setProofOfPossessionNonce(popNonce); AccessToken token = this.cache.getTokenSync(popTokenRequestContext, checkToForceFetchToken); setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken()); } private static void setAuthorizationHeader(HttpHeaders headers, String token) { headers.set(HttpHeaderName.AUTHORIZATION, "Pop " + token); } }
class PopTokenAuthenticationPolicy implements HttpPipelinePolicy { private static final ClientLogger LOGGER = new ClientLogger(PopTokenAuthenticationPolicy.class); private final List<String> scopes = new ArrayList<>(); private final AccessTokenCache cache; private String popNonce; /** * Creates a new instance of the PopTokenAuthenticationPolicy. * @param credential The credential to use for authentication. * @param scopes The scopes required for the token. */ public PopTokenAuthenticationPolicy(TokenCredential credential, String... scopes) { Objects.requireNonNull(credential); this.scopes.clear(); this.scopes.addAll(Arrays.asList(scopes)); this.cache = new AccessTokenCache(credential); } /** * Authorizes the request. * @param context The context of the request. * @return A {@link Mono} containing {@link Void} . */ public Mono<Void> authorizeRequest(HttpPipelineCallContext context) { return this.scopes == null ? Mono.empty() : this.setAuthorizationHeaderHelper(context, false); } /** * Authorizes the request synchronously. * @param context The context of the request. */ public void authorizeRequestSync(HttpPipelineCallContext context) { this.setAuthorizationHeaderHelperSync(context, false); } /** * Authorizes the request on challenge. * @param context The context of the request. * @param response The response of the request. * @return A {@link Mono} containing a {@link Boolean} indicating if the request was authorized. */ public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) { popNonce = getChallengeParameterFromResponse(response, "PoP", "nonce"); if (CoreUtils.isNullOrEmpty(popNonce)) { return Mono.just(false); } return this.scopes == null ? Mono.just(false) : this.setAuthorizationHeaderHelper(context, true).flatMap((ignored) -> Mono.just(true)); } /** * Authorizes the request on challenge synchronously. * @param context The context of the request. * @param response The response of the request. * @return A {@link Boolean} indicating if the request was authorized. */ public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(response, "PoP", "nonce"); if (CoreUtils.isNullOrEmpty(popNonce)) { return false; } if (this.scopes == null) { return false; } else { this.setAuthorizationHeaderHelperSync(context, true); return true; } } /** * Processes the request. * @param context The context of the request. * @param next The next policy in the pipeline. * @return A {@link Mono} containing the {@link HttpResponse}. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { return Mono.error(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } HttpPipelineNextPolicy nextPolicy = next.clone(); return authorizeRequest(context).then(Mono.defer(next::process)).flatMap(httpResponse -> { String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE); if (httpResponse.getStatusCode() == 401 && authHeader != null) { return authorizeRequestOnChallenge(context, httpResponse).flatMap(authorized -> { if (authorized) { httpResponse.close(); return nextPolicy.process(); } else { return Mono.just(httpResponse); } }); } else if (authHeader != null) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(httpResponse, "PoP", "nonce"); } return Mono.just(httpResponse); }); } /** * Processes the request synchronously. * @param context The context of the request. * @param next The next policy in the pipeline. * @return The {@link HttpResponse}. */ public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } else { HttpPipelineNextSyncPolicy nextPolicy = next.clone(); this.authorizeRequestSync(context); HttpResponse httpResponse = next.processSync(); String authHeader = httpResponse.getHeaderValue(HttpHeaderName.WWW_AUTHENTICATE); if (httpResponse.getStatusCode() == 401 && authHeader != null) { if (this.authorizeRequestOnChallengeSync(context, httpResponse)) { httpResponse.close(); return nextPolicy.processSync(); } else { return httpResponse; } } else if (authHeader != null) { popNonce = AuthorizationChallengeParser.getChallengeParameterFromResponse(httpResponse, "PoP", "nonce"); return httpResponse; } else { return httpResponse; } } } private void setAuthorizationHeaderHelperSync(HttpPipelineCallContext context, boolean checkToForceFetchToken) { if (!"https".equals(context.getHttpRequest().getUrl().getProtocol())) { throw LOGGER.logExceptionAsError(new RuntimeException( "Proof of possession token authentication is not permitted for non TLS-protected (HTTPS) endpoints.")); } PopTokenRequestContext popTokenRequestContext = new PopTokenRequestContext().addScopes(this.scopes.get(0)) .setProofOfPossessionNonce(popNonce) .setResourceRequestUrl(context.getHttpRequest().getUrl()) .setResourceRequestMethod(context.getHttpRequest().getHttpMethod()); AccessToken token = this.cache.getTokenSync(popTokenRequestContext, checkToForceFetchToken); setAuthorizationHeader(context.getHttpRequest().getHeaders(), token.getToken()); } private static void setAuthorizationHeader(HttpHeaders headers, String token) { headers.set(HttpHeaderName.AUTHORIZATION, "Pop " + token); } }
curious why do we change region?
public void testVirtualMachineUpdate() { final String vmname = "javavm1"; final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.7.sh " + password(); List<String> fileUris = new ArrayList<>(); fileUris.add(mySqlInstallScript); VirtualMachine vm = computeManager.virtualMachines() .define(vmname) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .create(); VirtualMachine.Update vmUpdate = vm.update(); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", fileUris) .withPublicSetting("commandToExecute", installCommand) .attach(); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withOSDiskCaching(CachingTypes.READ_ONLY); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withOSDiskCaching(CachingTypes.READ_WRITE); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withTag("key1", "value1"); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); vm = vmUpdate.apply(); Map<String, VirtualMachineExtension> extensions = vm.listExtensions(); Assertions.assertNotNull(extensions); Assertions.assertFalse(extensions.isEmpty()); VirtualMachineExtension customScriptExtension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(customScriptExtension); Assertions.assertEquals("value1", vm.tags().get("key1")); vm.update() .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .apply(); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); Assertions.assertEquals(ResourceIdentityType.SYSTEM_ASSIGNED, vm.managedServiceIdentityType()); Assertions.assertNotNull(vm.systemAssignedManagedServiceIdentityPrincipalId()); }
.withRegion(Region.US_WEST3)
public void testVirtualMachineUpdate() { final String vmname = "javavm1"; final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.7.sh " + password(); List<String> fileUris = new ArrayList<>(); fileUris.add(mySqlInstallScript); VirtualMachine vm = computeManager.virtualMachines() .define(vmname) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .create(); VirtualMachine.Update vmUpdate = vm.update(); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", fileUris) .withPublicSetting("commandToExecute", installCommand) .attach(); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withOSDiskCaching(CachingTypes.READ_ONLY); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withOSDiskCaching(CachingTypes.READ_WRITE); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withTag("key1", "value1"); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); vm = vmUpdate.apply(); Map<String, VirtualMachineExtension> extensions = vm.listExtensions(); Assertions.assertNotNull(extensions); Assertions.assertFalse(extensions.isEmpty()); VirtualMachineExtension customScriptExtension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(customScriptExtension); Assertions.assertEquals("value1", vm.tags().get("key1")); vm.update() .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .apply(); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); Assertions.assertEquals(ResourceIdentityType.SYSTEM_ASSIGNED, vm.managedServiceIdentityType()); Assertions.assertNotNull(vm.systemAssignedManagedServiceIdentityPrincipalId()); }
class VirtualMachineUpdateTests extends ComputeManagementTest { private String rgName = ""; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test private boolean isVirtualMachineModifiedDuringUpdate(VirtualMachine vm) { VirtualMachineImpl vmImpl = (VirtualMachineImpl) vm; return vmImpl.isVirtualMachineModifiedDuringUpdate(vmImpl.deepCopyInnerToUpdateParameter()); } }
class VirtualMachineUpdateTests extends ComputeManagementTest { private String rgName = ""; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test private boolean isVirtualMachineModifiedDuringUpdate(VirtualMachine vm) { VirtualMachineImpl vmImpl = (VirtualMachineImpl) vm; return vmImpl.isVirtualMachineModifiedDuringUpdate(vmImpl.deepCopyInnerToUpdateParameter()); } }
not enough resource... US_EAST seems to be among the most heavily used region.
public void testVirtualMachineUpdate() { final String vmname = "javavm1"; final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.7.sh " + password(); List<String> fileUris = new ArrayList<>(); fileUris.add(mySqlInstallScript); VirtualMachine vm = computeManager.virtualMachines() .define(vmname) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .create(); VirtualMachine.Update vmUpdate = vm.update(); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", fileUris) .withPublicSetting("commandToExecute", installCommand) .attach(); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withOSDiskCaching(CachingTypes.READ_ONLY); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withOSDiskCaching(CachingTypes.READ_WRITE); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withTag("key1", "value1"); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); vm = vmUpdate.apply(); Map<String, VirtualMachineExtension> extensions = vm.listExtensions(); Assertions.assertNotNull(extensions); Assertions.assertFalse(extensions.isEmpty()); VirtualMachineExtension customScriptExtension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(customScriptExtension); Assertions.assertEquals("value1", vm.tags().get("key1")); vm.update() .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .apply(); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); Assertions.assertEquals(ResourceIdentityType.SYSTEM_ASSIGNED, vm.managedServiceIdentityType()); Assertions.assertNotNull(vm.systemAssignedManagedServiceIdentityPrincipalId()); }
.withRegion(Region.US_WEST3)
public void testVirtualMachineUpdate() { final String vmname = "javavm1"; final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.7.sh " + password(); List<String> fileUris = new ArrayList<>(); fileUris.add(mySqlInstallScript); VirtualMachine vm = computeManager.virtualMachines() .define(vmname) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .create(); VirtualMachine.Update vmUpdate = vm.update(); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", fileUris) .withPublicSetting("commandToExecute", installCommand) .attach(); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withOSDiskCaching(CachingTypes.READ_ONLY); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withOSDiskCaching(CachingTypes.READ_WRITE); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withTag("key1", "value1"); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); vm = vmUpdate.apply(); Map<String, VirtualMachineExtension> extensions = vm.listExtensions(); Assertions.assertNotNull(extensions); Assertions.assertFalse(extensions.isEmpty()); VirtualMachineExtension customScriptExtension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(customScriptExtension); Assertions.assertEquals("value1", vm.tags().get("key1")); vm.update() .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .apply(); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); Assertions.assertEquals(ResourceIdentityType.SYSTEM_ASSIGNED, vm.managedServiceIdentityType()); Assertions.assertNotNull(vm.systemAssignedManagedServiceIdentityPrincipalId()); }
class VirtualMachineUpdateTests extends ComputeManagementTest { private String rgName = ""; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test private boolean isVirtualMachineModifiedDuringUpdate(VirtualMachine vm) { VirtualMachineImpl vmImpl = (VirtualMachineImpl) vm; return vmImpl.isVirtualMachineModifiedDuringUpdate(vmImpl.deepCopyInnerToUpdateParameter()); } }
class VirtualMachineUpdateTests extends ComputeManagementTest { private String rgName = ""; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test private boolean isVirtualMachineModifiedDuringUpdate(VirtualMachine vm) { VirtualMachineImpl vmImpl = (VirtualMachineImpl) vm; return vmImpl.isVirtualMachineModifiedDuringUpdate(vmImpl.deepCopyInnerToUpdateParameter()); } }
ubuntu 14 seems no longer available.
public void testVirtualMachineUpdate() { final String vmname = "javavm1"; final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.7.sh " + password(); List<String> fileUris = new ArrayList<>(); fileUris.add(mySqlInstallScript); VirtualMachine vm = computeManager.virtualMachines() .define(vmname) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .create(); VirtualMachine.Update vmUpdate = vm.update(); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", fileUris) .withPublicSetting("commandToExecute", installCommand) .attach(); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withOSDiskCaching(CachingTypes.READ_ONLY); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withOSDiskCaching(CachingTypes.READ_WRITE); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withTag("key1", "value1"); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); vm = vmUpdate.apply(); Map<String, VirtualMachineExtension> extensions = vm.listExtensions(); Assertions.assertNotNull(extensions); Assertions.assertFalse(extensions.isEmpty()); VirtualMachineExtension customScriptExtension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(customScriptExtension); Assertions.assertEquals("value1", vm.tags().get("key1")); vm.update() .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .apply(); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); Assertions.assertEquals(ResourceIdentityType.SYSTEM_ASSIGNED, vm.managedServiceIdentityType()); Assertions.assertNotNull(vm.systemAssignedManagedServiceIdentityPrincipalId()); }
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
public void testVirtualMachineUpdate() { final String vmname = "javavm1"; final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.7.sh " + password(); List<String> fileUris = new ArrayList<>(); fileUris.add(mySqlInstallScript); VirtualMachine vm = computeManager.virtualMachines() .define(vmname) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .create(); VirtualMachine.Update vmUpdate = vm.update(); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", fileUris) .withPublicSetting("commandToExecute", installCommand) .attach(); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withOSDiskCaching(CachingTypes.READ_ONLY); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withOSDiskCaching(CachingTypes.READ_WRITE); Assertions.assertFalse(this.isVirtualMachineModifiedDuringUpdate(vm)); vmUpdate = vmUpdate.withTag("key1", "value1"); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); vm = vmUpdate.apply(); Map<String, VirtualMachineExtension> extensions = vm.listExtensions(); Assertions.assertNotNull(extensions); Assertions.assertFalse(extensions.isEmpty()); VirtualMachineExtension customScriptExtension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(customScriptExtension); Assertions.assertEquals("value1", vm.tags().get("key1")); vm.update() .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .apply(); Assertions.assertTrue(this.isVirtualMachineModifiedDuringUpdate(vm)); Assertions.assertEquals(ResourceIdentityType.SYSTEM_ASSIGNED, vm.managedServiceIdentityType()); Assertions.assertNotNull(vm.systemAssignedManagedServiceIdentityPrincipalId()); }
class VirtualMachineUpdateTests extends ComputeManagementTest { private String rgName = ""; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test private boolean isVirtualMachineModifiedDuringUpdate(VirtualMachine vm) { VirtualMachineImpl vmImpl = (VirtualMachineImpl) vm; return vmImpl.isVirtualMachineModifiedDuringUpdate(vmImpl.deepCopyInnerToUpdateParameter()); } }
class VirtualMachineUpdateTests extends ComputeManagementTest { private String rgName = ""; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test private boolean isVirtualMachineModifiedDuringUpdate(VirtualMachine vm) { VirtualMachineImpl vmImpl = (VirtualMachineImpl) vm; return vmImpl.isVirtualMachineModifiedDuringUpdate(vmImpl.deepCopyInnerToUpdateParameter()); } }
`Assertions.assertInstanceOf` returns a value of the object mapped to the type if you want to further cleanup code by removing the type cast in the line below.
public void consumeStorageBlobDeletedEventWithExtraProperty() throws IOException { String jsonData = getTestPayloadFromFile("StorageBlobDeletedEventWithExtraProperty.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(StorageBlobDeletedEventData.class, toSystemEventData(events[0])); StorageBlobDeletedEventData eventData = (StorageBlobDeletedEventData) toSystemEventData(events[0]); assertEquals("https: }
assertInstanceOf(StorageBlobDeletedEventData.class, toSystemEventData(events[0]));
public void consumeStorageBlobDeletedEventWithExtraProperty() throws IOException { String jsonData = getTestPayloadFromFile("StorageBlobDeletedEventWithExtraProperty.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); StorageBlobDeletedEventData eventData = assertInstanceOf(StorageBlobDeletedEventData.class, toSystemEventData(events[0])); assertEquals("https: }
class DeserializationTests { static <T> Object toSystemEventData(EventGridEvent event) { return getSystemEventData(event.getData(), event.getEventType()); } static <T> Object toSystemEventData(CloudEvent event) { return getSystemEventData(event.getData(), event.getType()); } static Object getSystemEventData(BinaryData data, String eventType) { if (SystemEventNames.getSystemEventMappings().containsKey(eventType)) { return data .toObject(TypeReference.createInstance(SystemEventNames.getSystemEventMappings().get(eventType))); } return null; } @ParameterizedTest @MethodSource("getObjectsForRoundTrip") public void testEventGridRoundTripStreamSerialization(BinaryData payload) { EventGridEvent eventGridEvent = new EventGridEvent("subject", "eventType", payload, "dataVersion"); ByteArrayOutputStream stream = new ByteArrayOutputStream(); try { JsonWriter writer = JsonProviders.createWriter(stream); eventGridEvent.toJson(writer); writer.flush(); try (JsonReader reader = JsonProviders.createReader(stream.toByteArray())) { EventGridEvent deserializedEvent = EventGridEvent.fromJson(reader); assertEquals(eventGridEvent.getSubject(), deserializedEvent.getSubject()); assertEquals(eventGridEvent.getEventType(), deserializedEvent.getEventType()); assertArrayEquals(eventGridEvent.getData().toBytes(), deserializedEvent.getData().toBytes()); assertEquals(eventGridEvent.getDataVersion(), deserializedEvent.getDataVersion()); } } catch (IOException e) { throw new RuntimeException(e); } } private static Stream<Arguments> getObjectsForRoundTrip() { return Stream.of( Arguments.of(BinaryData.fromObject(1)), Arguments.of(BinaryData.fromObject("data")), Arguments.of(BinaryData.fromString("{\"data\":\"data\"}")), Arguments.of(BinaryData.fromObject(true)) ); } @Test @Test public void consumeEventGridEventWithoutArrayBrackets() throws IOException { String jsonData = getTestPayloadFromFile("EventGridEventNoArray.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(1, events.length); assertInstanceOf(StorageBlobDeletedEventData.class, toSystemEventData(events[0])); StorageBlobDeletedEventData eventData = (StorageBlobDeletedEventData) toSystemEventData(events[0]); assertEquals("https: } @Test public void consumeEventGridEventWithNullData() throws IOException { String jsonData = getTestPayloadFromFile("EventGridNullData.json"); assertThrows(IllegalArgumentException.class, () -> { EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); }); } @Test public void consumeCustomEvents() throws IOException { String jsonData = getTestPayloadFromFile("CustomEvents.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(1, events.length); assertNotNull(events[0].getData().toObject(TypeReference.createInstance(ContosoItemReceivedEventData.class))); ContosoItemReceivedEventData eventData = events[0].getData().toObject(TypeReference.createInstance(ContosoItemReceivedEventData.class)); assertEquals("512d38b6-c7b8-40c8-89fe-f46f9e9622b6", eventData.getItemSku()); } @Test public void consumeCustomEventWithArrayData() throws IOException { String jsonData = getTestPayloadFromFile("CustomEventWithArrayData.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(1, events.length); List<ContosoItemReceivedEventData> eventData = events[0].getData().toObject(new TypeReference<List<ContosoItemReceivedEventData>>() {}); assertNotNull(eventData); assertEquals("512d38b6-c7b8-40c8-89fe-f46f9e9622b6", (eventData.get(0)).getItemSku()); } @Test public void consumeCustomEventWithBooleanData() throws IOException { String jsonData = getTestPayloadFromFile("CustomEventWithBooleanData.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(1, events.length); Boolean eventData = events[0].getData().toObject(TypeReference.createInstance(Boolean.class)); assertNotNull(eventData); assertTrue(eventData); } @Test public void consumeCustomEventWithStringData() throws IOException { String jsonData = getTestPayloadFromFile("CustomEventWithStringData.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(1, events.length); String eventData = events[0].getData().toObject(String.class); assertNotNull(eventData); assertEquals("stringdata", eventData); } @Test public void consumeCustomEventWithPolymorphicData() throws IOException { String jsonData = getTestPayloadFromFile("CustomEventWithPolymorphicData.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(2, events.length); ContosoItemSentEventData eventData0 = events[0].getData().toObject(ContosoItemSentEventData.class); ContosoItemSentEventData eventData1 = events[1].getData().toObject(ContosoItemSentEventData.class); assertNotNull(eventData0); assertNotNull(eventData1); assertInstanceOf(DroneShippingInfo.class, eventData0.getShippingInfo()); assertInstanceOf(RocketShippingInfo.class, eventData1.getShippingInfo()); } @Test public void consumeMultipleEventsInSameBatch() throws IOException { String jsonData = getTestPayloadFromFile("MultipleEventsInSameBatch.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(4, events.length); assertInstanceOf(StorageBlobCreatedEventData.class, toSystemEventData(events[0])); assertInstanceOf(StorageBlobDeletedEventData.class, toSystemEventData(events[1])); assertInstanceOf(StorageBlobDeletedEventData.class, toSystemEventData(events[2])); assertInstanceOf(ServiceBusDeadletterMessagesAvailableWithNoListenersEventData.class, toSystemEventData(events[3])); StorageBlobDeletedEventData eventData = (StorageBlobDeletedEventData) toSystemEventData(events[2]); assertEquals("https: } @Test public void consumeAppConfigurationKeyValueDeletedEvent() throws IOException { String jsonData = getTestPayloadFromFile("AppConfigurationKeyValueDeleted.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(AppConfigurationKeyValueDeletedEventData.class, toSystemEventData(events[0])); AppConfigurationKeyValueDeletedEventData eventData = (AppConfigurationKeyValueDeletedEventData) toSystemEventData(events[0]); assertEquals("key1", eventData.getKey()); } @Test public void consumeAppConfigurationKeyValueModifiedEvent() throws IOException { String jsonData = getTestPayloadFromFile("AppConfigurationKeyValueModified.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(AppConfigurationKeyValueModifiedEventData.class, toSystemEventData(events[0])); AppConfigurationKeyValueModifiedEventData eventData = (AppConfigurationKeyValueModifiedEventData) toSystemEventData(events[0]); assertEquals("key1", eventData.getKey()); } @Test public void consumeContainerRegistryImagePushedEvent() throws IOException { String jsonData = getTestPayloadFromFile("ContainerRegistryImagePushedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ContainerRegistryImagePushedEventData.class, toSystemEventData(events[0])); ContainerRegistryImagePushedEventData eventData = (ContainerRegistryImagePushedEventData) toSystemEventData(events[0]); assertEquals("127.0.0.1", eventData.getRequest().getAddr()); } @Test public void consumeContainerRegistryImageDeletedEvent() throws IOException { String jsonData = getTestPayloadFromFile("ContainerRegistryImageDeletedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ContainerRegistryImageDeletedEventData.class, toSystemEventData(events[0])); ContainerRegistryImageDeletedEventData eventData = (ContainerRegistryImageDeletedEventData) toSystemEventData(events[0]); assertEquals("testactor", eventData.getActor().getName()); } @Test public void consumeContainerRegistryChartDeletedEvent() throws IOException { String jsonData = getTestPayloadFromFile("ContainerRegistryChartDeletedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ContainerRegistryChartDeletedEventData.class, toSystemEventData(events[0])); ContainerRegistryChartDeletedEventData eventData = (ContainerRegistryChartDeletedEventData) toSystemEventData(events[0]); assertEquals("mediatype1", eventData.getTarget().getMediaType()); } @Test public void consumeContainerRegistryChartPushedEvent() throws IOException { String jsonData = getTestPayloadFromFile("ContainerRegistryChartPushedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ContainerRegistryChartPushedEventData.class, toSystemEventData(events[0])); ContainerRegistryChartPushedEventData eventData = (ContainerRegistryChartPushedEventData) toSystemEventData(events[0]); assertEquals("mediatype1", eventData.getTarget().getMediaType()); } @Test public void consumeIoTHubDeviceCreatedEvent() throws IOException { String jsonData = getTestPayloadFromFile("IoTHubDeviceCreatedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(IotHubDeviceCreatedEventData.class, toSystemEventData(events[0])); IotHubDeviceCreatedEventData eventData = (IotHubDeviceCreatedEventData) toSystemEventData(events[0]); assertEquals("enabled", eventData.getTwin().getStatus()); } @Test public void consumeIoTHubDeviceDeletedEvent() throws IOException { String jsonData = getTestPayloadFromFile("IoTHubDeviceDeletedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(IotHubDeviceDeletedEventData.class, toSystemEventData(events[0])); IotHubDeviceDeletedEventData eventData = (IotHubDeviceDeletedEventData) toSystemEventData(events[0]); assertEquals("AAAAAAAAAAE=", eventData.getTwin().getEtag()); } @Test public void consumeIoTHubDeviceConnectedEvent() throws IOException { String jsonData = getTestPayloadFromFile("IoTHubDeviceConnectedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(IotHubDeviceConnectedEventData.class, toSystemEventData(events[0])); IotHubDeviceConnectedEventData eventData = (IotHubDeviceConnectedEventData) toSystemEventData(events[0]); assertEquals("EGTESTHUB1", eventData.getHubName()); } @Test public void consumeIoTHubDeviceDisconnectedEvent() throws IOException { String jsonData = getTestPayloadFromFile("IoTHubDeviceDisconnectedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(IotHubDeviceDisconnectedEventData.class, toSystemEventData(events[0])); IotHubDeviceDisconnectedEventData eventData = (IotHubDeviceDisconnectedEventData) toSystemEventData(events[0]); assertEquals("000000000000000001D4132452F67CE200000002000000000000000000000002", eventData.getDeviceConnectionStateEventInfo().getSequenceNumber()); } @Test public void consumeIoTHubDeviceTelemetryEvent() throws IOException { String jsonData = getTestPayloadFromFile("IoTHubDeviceTelemetryEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(IotHubDeviceTelemetryEventData.class, toSystemEventData(events[0])); IotHubDeviceTelemetryEventData eventData = (IotHubDeviceTelemetryEventData) toSystemEventData(events[0]); assertEquals("Active", eventData.getProperties().get("Status")); } @Test public void consumeEventGridSubscriptionValidationEvent() throws IOException { String jsonData = getTestPayloadFromFile("EventGridSubscriptionValidationEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(SubscriptionValidationEventData.class, toSystemEventData(events[0])); SubscriptionValidationEventData eventData = (SubscriptionValidationEventData) toSystemEventData(events[0]); assertEquals("512d38b6-c7b8-40c8-89fe-f46f9e9622b6", eventData.getValidationCode()); } @Test public void consumeEventGridSubscriptionDeletedEvent() throws IOException { String jsonData = getTestPayloadFromFile("EventGridSubscriptionDeletedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(SubscriptionDeletedEventData.class, toSystemEventData(events[0])); SubscriptionDeletedEventData eventData = (SubscriptionDeletedEventData) toSystemEventData(events[0]); assertEquals("/subscriptions/id/resourceGroups/rg/providers/Microsoft.EventGrid/topics/topic1/providers/Microsoft.EventGrid/eventSubscriptions/eventsubscription1", eventData.getEventSubscriptionId()); } @Test public void consumeEventHubCaptureFileCreatedEvent() throws IOException { String jsonData = getTestPayloadFromFile("EventHubCaptureFileCreatedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(EventHubCaptureFileCreatedEventData.class, toSystemEventData(events[0])); EventHubCaptureFileCreatedEventData eventData = (EventHubCaptureFileCreatedEventData) toSystemEventData(events[0]); assertEquals("AzureBlockBlob", eventData.getFileType()); } @Test public void consumeMapsGeoFenceEnteredEvent() throws IOException { String jsonData = getTestPayloadFromFile("MapsGeofenceEnteredEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MapsGeofenceEnteredEventData.class, toSystemEventData(events[0])); MapsGeofenceEnteredEventData eventData = (MapsGeofenceEnteredEventData) toSystemEventData(events[0]); assertEquals(true, eventData.isEventPublished()); } @Test public void consumeMapsGeoFenceExitedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MapsGeofenceExitedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MapsGeofenceExitedEventData.class, toSystemEventData(events[0])); MapsGeofenceExitedEventData eventData = (MapsGeofenceExitedEventData) toSystemEventData(events[0]); assertEquals(true, eventData.isEventPublished()); } @Test public void consumeMapsGeoFenceResultEvent() throws IOException { String jsonData = getTestPayloadFromFile("MapsGeofenceResultEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MapsGeofenceResultEventData.class, toSystemEventData(events[0])); MapsGeofenceResultEventData eventData = (MapsGeofenceResultEventData) toSystemEventData(events[0]); assertEquals(true, eventData.isEventPublished()); } @Test public void consumeMediaJobCanceledEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobCanceledEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobCanceledEventData.class, toSystemEventData(events[0])); MediaJobCanceledEventData eventData = (MediaJobCanceledEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.CANCELING, eventData.getPreviousState()); assertEquals(MediaJobState.CANCELED, eventData.getState()); assertEquals(1, eventData.getOutputs().size()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutputs().get(0)); MediaJobOutputAsset outputAsset = (MediaJobOutputAsset) eventData.getOutputs().get(0); assertEquals(MediaJobState.CANCELED, outputAsset.getState()); assertNull(outputAsset.getError()); assertNotEquals(100, outputAsset.getProgress()); assertEquals("output-7a8215f9-0f8d-48a6-82ed-1ead772bc221", outputAsset.getAssetName()); } @Test public void consumeMediaJobCancelingEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobCancelingEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobCancelingEventData.class, toSystemEventData(events[0])); MediaJobCancelingEventData eventData = (MediaJobCancelingEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.PROCESSING, eventData.getPreviousState()); assertEquals(MediaJobState.CANCELING, eventData.getState()); } @Test public void consumeMediaJobProcessingEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobProcessingEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobProcessingEventData.class, toSystemEventData(events[0])); MediaJobProcessingEventData eventData = (MediaJobProcessingEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.SCHEDULED, eventData.getPreviousState()); assertEquals(MediaJobState.PROCESSING, eventData.getState()); } @Test public void consumeMediaJobFinishedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobFinishedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobFinishedEventData.class, toSystemEventData(events[0])); MediaJobFinishedEventData eventData = (MediaJobFinishedEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.PROCESSING, eventData.getPreviousState()); assertEquals(MediaJobState.FINISHED, eventData.getState()); assertEquals(1, eventData.getOutputs().size()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutputs().get(0)); MediaJobOutputAsset outputAsset = (MediaJobOutputAsset) eventData.getOutputs().get(0); assertEquals(MediaJobState.FINISHED, outputAsset.getState()); assertNull(outputAsset.getError()); assertEquals(100, outputAsset.getProgress()); assertEquals("output-298338bb-f8d1-4d0f-9fde-544e0ac4d983", outputAsset.getAssetName()); } @Test public void consumeMediaJobErroredEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobErroredEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobErroredEventData.class, toSystemEventData(events[0])); MediaJobErroredEventData eventData = (MediaJobErroredEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.PROCESSING, eventData.getPreviousState()); assertEquals(MediaJobState.ERROR, eventData.getState()); assertEquals(1, eventData.getOutputs().size()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutputs().get(0)); assertEquals(MediaJobState.ERROR, eventData.getOutputs().get(0).getState()); assertNotNull(eventData.getOutputs().get(0).getError()); assertEquals(MediaJobErrorCategory.SERVICE, eventData.getOutputs().get(0).getError().getCategory()); assertEquals(MediaJobErrorCode.SERVICE_ERROR, eventData.getOutputs().get(0).getError().getCode()); } @Test public void consumeMediaJobOutputStateChangeEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputStateChangeEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobOutputStateChangeEventData.class, toSystemEventData(events[0])); MediaJobOutputStateChangeEventData eventData = (MediaJobOutputStateChangeEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.SCHEDULED, eventData.getPreviousState()); assertEquals(MediaJobState.PROCESSING, eventData.getOutput().getState()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); MediaJobOutputAsset outputAsset = (MediaJobOutputAsset) eventData.getOutput(); assertEquals("output-2ac2fe75-6557-4de5-ab25-5713b74a6901", outputAsset.getAssetName()); } @Test public void consumeMediaJobScheduledEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobScheduledEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobScheduledEventData.class, toSystemEventData(events[0])); MediaJobScheduledEventData eventData = (MediaJobScheduledEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.QUEUED, eventData.getPreviousState()); assertEquals(MediaJobState.SCHEDULED, eventData.getState()); } @Test public void consumeMediaJobOutputCanceledEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputCanceledEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobOutputCanceledEventData.class, toSystemEventData(events[0])); MediaJobOutputCanceledEventData eventData = (MediaJobOutputCanceledEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.CANCELING, eventData.getPreviousState()); assertEquals(MediaJobState.CANCELED, eventData.getOutput().getState()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); } @Test public void consumeMediaJobOutputCancelingEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputCancelingEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobOutputCancelingEventData.class, toSystemEventData(events[0])); MediaJobOutputCancelingEventData eventData = (MediaJobOutputCancelingEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.PROCESSING, eventData.getPreviousState()); assertEquals(MediaJobState.CANCELING, eventData.getOutput().getState()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); } @Test public void consumeMediaJobOutputErroredEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputErroredEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobOutputErroredEventData.class, toSystemEventData(events[0])); MediaJobOutputErroredEventData eventData = (MediaJobOutputErroredEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.PROCESSING, eventData.getPreviousState()); assertEquals(MediaJobState.ERROR, eventData.getOutput().getState()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); assertNotNull(eventData.getOutput().getError()); assertEquals(MediaJobErrorCategory.SERVICE, eventData.getOutput().getError().getCategory()); assertEquals(MediaJobErrorCode.SERVICE_ERROR, eventData.getOutput().getError().getCode()); } @Test public void consumeMediaJobOutputFinishedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputFinishedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobOutputFinishedEventData.class, toSystemEventData(events[0])); MediaJobOutputFinishedEventData eventData = (MediaJobOutputFinishedEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.PROCESSING, eventData.getPreviousState()); assertEquals(MediaJobState.FINISHED, eventData.getOutput().getState()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); assertEquals(100, eventData.getOutput().getProgress()); MediaJobOutputAsset outputAsset = (MediaJobOutputAsset) eventData.getOutput(); assertEquals("output-2ac2fe75-6557-4de5-ab25-5713b74a6901", outputAsset.getAssetName()); } @Test public void consumeMediaJobOutputProcessingEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputProcessingEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobOutputProcessingEventData.class, toSystemEventData(events[0])); MediaJobOutputProcessingEventData eventData = (MediaJobOutputProcessingEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.SCHEDULED, eventData.getPreviousState()); assertEquals(MediaJobState.PROCESSING, eventData.getOutput().getState()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); } @Test public void consumeMediaJobOutputScheduledEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputScheduledEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobOutputScheduledEventData.class, toSystemEventData(events[0])); MediaJobOutputScheduledEventData eventData = (MediaJobOutputScheduledEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.QUEUED, eventData.getPreviousState()); assertEquals(MediaJobState.SCHEDULED, eventData.getOutput().getState()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); } @Test public void consumeMediaJobOutputProgressEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputProgressEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobOutputProgressEventData.class, toSystemEventData(events[0])); MediaJobOutputProgressEventData eventData = (MediaJobOutputProgressEventData) toSystemEventData(events[0]); assertEquals("TestLabel", eventData.getLabel()); assertTrue(eventData.getJobCorrelationData().containsKey("Field1")); assertEquals("test1", eventData.getJobCorrelationData().get("Field1")); assertTrue(eventData.getJobCorrelationData().containsKey("Field2")); assertEquals("test2", eventData.getJobCorrelationData().get("Field2")); } @Test public void consumeMediaJobStateChangeEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobStateChangeEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaJobStateChangeEventData.class, toSystemEventData(events[0])); MediaJobStateChangeEventData eventData = (MediaJobStateChangeEventData) toSystemEventData(events[0]); assertEquals(MediaJobState.SCHEDULED, eventData.getPreviousState()); assertEquals(MediaJobState.PROCESSING, eventData.getState()); } @Test public void consumeMediaLiveEventEncoderConnectedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventEncoderConnectedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventEncoderConnectedEventData.class, toSystemEventData(events[0])); MediaLiveEventEncoderConnectedEventData eventData = (MediaLiveEventEncoderConnectedEventData) toSystemEventData(events[0]); assertEquals("rtmp: assertEquals("Mystream1", eventData.getStreamId()); assertEquals("<ip address>", eventData.getEncoderIp()); assertEquals("3557", eventData.getEncoderPort()); } @Test public void consumeMediaLiveEventConnectionRejectedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventConnectionRejectedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventConnectionRejectedEventData.class, toSystemEventData(events[0])); MediaLiveEventConnectionRejectedEventData eventData = (MediaLiveEventConnectionRejectedEventData) toSystemEventData(events[0]); assertEquals("Mystream1", eventData.getStreamId()); } @Test public void consumeMediaLiveEventEncoderDisconnectedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventEncoderDisconnectedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventEncoderDisconnectedEventData.class, toSystemEventData(events[0])); MediaLiveEventEncoderDisconnectedEventData eventData = (MediaLiveEventEncoderDisconnectedEventData) toSystemEventData(events[0]); assertEquals("rtmp: assertEquals("Mystream1", eventData.getStreamId()); assertEquals("<ip address>", eventData.getEncoderIp()); assertEquals("3557", eventData.getEncoderPort()); } @Test public void consumeMediaLiveEventIncomingStreamReceivedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventIncomingStreamReceivedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventIncomingStreamReceivedEventData.class, toSystemEventData(events[0])); MediaLiveEventIncomingStreamReceivedEventData eventData = (MediaLiveEventIncomingStreamReceivedEventData) toSystemEventData(events[0]); assertEquals("rtmp: assertEquals("<ip address>", eventData.getEncoderIp()); assertEquals("3557", eventData.getEncoderPort()); assertEquals("audio", eventData.getTrackType()); assertEquals("audio_160000", eventData.getTrackName()); assertEquals("66", eventData.getTimestamp()); assertEquals("1950", eventData.getDuration()); assertEquals("1000", eventData.getTimescale()); } @Test public void consumeMediaLiveEventIncomingStreamsOutOfSyncEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventIncomingStreamsOutOfSyncEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventIncomingStreamsOutOfSyncEventData.class, toSystemEventData(events[0])); MediaLiveEventIncomingStreamsOutOfSyncEventData eventData = (MediaLiveEventIncomingStreamsOutOfSyncEventData) toSystemEventData(events[0]); assertEquals("10999", eventData.getMinLastTimestamp()); assertEquals("video", eventData.getTypeOfStreamWithMinLastTimestamp()); assertEquals("100999", eventData.getMaxLastTimestamp()); assertEquals("audio", eventData.getTypeOfStreamWithMaxLastTimestamp()); assertEquals("1000", eventData.getTimescaleOfMinLastTimestamp()); assertEquals("1000", eventData.getTimescaleOfMaxLastTimestamp()); } @Test public void consumeMediaLiveEventIncomingVideoStreamsOutOfSyncEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventIncomingVideoStreamsOutOfSyncEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventIncomingVideoStreamsOutOfSyncEventData.class, toSystemEventData(events[0])); MediaLiveEventIncomingVideoStreamsOutOfSyncEventData eventData = (MediaLiveEventIncomingVideoStreamsOutOfSyncEventData) toSystemEventData(events[0]); assertEquals("10999", eventData.getFirstTimestamp()); assertEquals("2000", eventData.getFirstDuration()); assertEquals("100999", eventData.getSecondTimestamp()); assertEquals("2000", eventData.getSecondDuration()); assertEquals("1000", eventData.getTimescale()); } @Test public void consumeMediaLiveEventIncomingDataChunkDroppedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventIncomingDataChunkDroppedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventIncomingDataChunkDroppedEventData.class, toSystemEventData(events[0])); MediaLiveEventIncomingDataChunkDroppedEventData eventData = (MediaLiveEventIncomingDataChunkDroppedEventData) toSystemEventData(events[0]); assertEquals("8999", eventData.getTimestamp()); assertEquals("video", eventData.getTrackType()); assertEquals("video1", eventData.getTrackName()); assertEquals("1000", eventData.getTimescale()); assertEquals("FragmentDrop_OverlapTimestamp", eventData.getResultCode()); } @Test public void consumeMediaLiveEventIngestHeartbeatEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventIngestHeartbeatEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventIngestHeartbeatEventData.class, toSystemEventData(events[0])); MediaLiveEventIngestHeartbeatEventData eventData = (MediaLiveEventIngestHeartbeatEventData) toSystemEventData(events[0]); assertEquals("video", eventData.getTrackType()); assertEquals("video", eventData.getTrackName()); assertEquals("11999", eventData.getLastTimestamp()); assertEquals("1000", eventData.getTimescale()); assertTrue(eventData.isUnexpectedBitrate()); assertEquals("Running", eventData.getState()); assertFalse(eventData.isHealthy()); assertEquals(0, eventData.getIngestDriftValue()); assertEquals(OffsetDateTime.parse("2021-05-14T23:50:00.00Z"), eventData.getLastFragmentArrivalTime()); jsonData = "[{ \"topic\": \"/subscriptions/{subscription id}/resourceGroups/{resource group}/providers/Microsoft.Media/mediaservices/{account name}\", \"subject\": \"liveEvent/liveevent-ec9d26a8\", \"eventType\": \"Microsoft.Media.LiveEventIngestHeartbeat\", \"eventTime\": \"2018-10-12T15:52:37.3710102\", \"id\": \"d84727e2-d9c0-4a21-a66b-8d23f06b3e06\", \"data\": { \"trackType\": \"video\", \"trackName\": \"video\", \"bitrate\": 2500000, \"incomingBitrate\": 500726, \"lastTimestamp\": \"11999\", \"timescale\": \"1000\", \"overlapCount\": 0, \"discontinuityCount\": 0, \"nonincreasingCount\": 0, \"unexpectedBitrate\": true, \"state\": \"Running\", \"healthy\": false, \"lastFragmentArrivalTime\": \"2021-05-14T23:50:00.00\", \"ingestDriftValue\": \"n/a\" }, \"dataVersion\": \"1.0\", \"metadataVersion\": \"1\"}]"; events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventIngestHeartbeatEventData.class, toSystemEventData(events[0])); eventData = (MediaLiveEventIngestHeartbeatEventData) toSystemEventData(events[0]); assertNull(eventData.getIngestDriftValue()); } @Test public void consumeMediaLiveEventTrackDiscontinuityDetectedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventTrackDiscontinuityDetectedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventTrackDiscontinuityDetectedEventData.class, toSystemEventData(events[0])); MediaLiveEventTrackDiscontinuityDetectedEventData eventData = (MediaLiveEventTrackDiscontinuityDetectedEventData) toSystemEventData(events[0]); assertEquals("video", eventData.getTrackType()); assertEquals("video", eventData.getTrackName()); assertEquals("10999", eventData.getPreviousTimestamp()); assertEquals("14999", eventData.getNewTimestamp()); assertEquals("1000", eventData.getTimescale()); assertEquals("4000", eventData.getDiscontinuityGap()); } @Test public void consumeMediaLiveEventChannelArchiveHeartbeatEvent() throws IOException { String jsonData = "[{ \"topic\": \"/subscriptions/{subscription id}/resourceGroups/{resource group}/providers/Microsoft.Media/mediaservices/{account name}\", \"subject\": \"liveEvent/mle1\", \"eventType\": \"Microsoft.Media.LiveEventChannelArchiveHeartbeat\", \"eventTime\": \"2021-05-14T23:50:00.324\", \"id\": \"7f450938-491f-41e1-b06f-c6cd3965d786\", \"data\": { \"channelLatencyMs\": \"10\", \"latencyResultCode\": \"S_OK\"}, \"dataVersion\": \"1.0\", \"metadataVersion\": \"1\"}]"; EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventChannelArchiveHeartbeatEventData.class, toSystemEventData(events[0])); MediaLiveEventChannelArchiveHeartbeatEventData eventData = (MediaLiveEventChannelArchiveHeartbeatEventData) toSystemEventData(events[0]); assertEquals(Duration.ofMillis(10), eventData.getChannelLatency()); assertEquals("S_OK", eventData.getLatencyResultCode()); jsonData = "[{ \"topic\": \"/subscriptions/{subscription id}/resourceGroups/{resource group}/providers/Microsoft.Media/mediaservices/{account name}\", \"subject\": \"liveEvent/mle1\", \"eventType\": \"Microsoft.Media.LiveEventChannelArchiveHeartbeat\", \"eventTime\": \"2021-05-14T23:50:00.324\", \"id\": \"7f450938-491f-41e1-b06f-c6cd3965d786\", \"data\": { \"channelLatencyMs\": \"n/a\", \"latencyResultCode\": \"S_OK\"}, \"dataVersion\": \"1.0\", \"metadataVersion\": \"1\"}]"; events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventChannelArchiveHeartbeatEventData.class, toSystemEventData(events[0])); eventData = (MediaLiveEventChannelArchiveHeartbeatEventData) toSystemEventData(events[0]); assertNull(eventData.getChannelLatency()); assertEquals("S_OK", eventData.getLatencyResultCode()); } @Test public void consumeResourceWriteFailureEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceWriteFailureEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ResourceWriteFailureEventData.class, toSystemEventData(events[0])); ResourceWriteFailureEventData eventData = (ResourceWriteFailureEventData) toSystemEventData(events[0]); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceWriteCancelEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceWriteCancelEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ResourceWriteCancelEventData.class, toSystemEventData(events[0])); ResourceWriteCancelEventData eventData = (ResourceWriteCancelEventData) toSystemEventData(events[0]); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceDeleteSuccessEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceDeleteSuccessEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ResourceDeleteSuccessEventData.class, toSystemEventData(events[0])); ResourceDeleteSuccessEventData eventData = (ResourceDeleteSuccessEventData) toSystemEventData(events[0]); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceDeleteFailureEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceDeleteFailureEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ResourceDeleteFailureEventData.class, toSystemEventData(events[0])); ResourceDeleteFailureEventData eventData = (ResourceDeleteFailureEventData) toSystemEventData(events[0]); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceDeleteCancelEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceDeleteCancelEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ResourceDeleteCancelEventData.class, toSystemEventData(events[0])); ResourceDeleteCancelEventData eventData = (ResourceDeleteCancelEventData) toSystemEventData(events[0]); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceActionSuccessEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceActionSuccessEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ResourceActionSuccessEventData.class, toSystemEventData(events[0])); ResourceActionSuccessEventData eventData = (ResourceActionSuccessEventData) toSystemEventData(events[0]); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceActionFailureEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceActionFailureEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ResourceActionFailureEventData.class, toSystemEventData(events[0])); ResourceActionFailureEventData eventData = (ResourceActionFailureEventData) toSystemEventData(events[0]); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceActionCancelEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceActionCancelEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ResourceActionCancelEventData.class, toSystemEventData(events[0])); ResourceActionCancelEventData eventData = (ResourceActionCancelEventData) toSystemEventData(events[0]); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeServiceBusActiveMessagesAvailableWithNoListenersEvent() throws IOException { String jsonData = getTestPayloadFromFile("ServiceBusActiveMessagesAvailableWithNoListenersEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ServiceBusActiveMessagesAvailableWithNoListenersEventData.class, toSystemEventData(events[0])); ServiceBusActiveMessagesAvailableWithNoListenersEventData eventData = (ServiceBusActiveMessagesAvailableWithNoListenersEventData) toSystemEventData(events[0]); assertEquals("testns1", eventData.getNamespaceName()); } @Test public void consumeServiceBusDeadletterMessagesAvailableWithNoListenersEvent() throws IOException { String jsonData = getTestPayloadFromFile("ServiceBusDeadletterMessagesAvailableWithNoListenersEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ServiceBusDeadletterMessagesAvailableWithNoListenersEventData.class, toSystemEventData(events[0])); ServiceBusDeadletterMessagesAvailableWithNoListenersEventData eventData = (ServiceBusDeadletterMessagesAvailableWithNoListenersEventData) toSystemEventData(events[0]); assertEquals("testns1", eventData.getNamespaceName()); } @Test public void consumeStorageBlobCreatedEvent() throws IOException { String jsonData = getTestPayloadFromFile("StorageBlobCreatedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(StorageBlobCreatedEventData.class, toSystemEventData(events[0])); StorageBlobCreatedEventData eventData = (StorageBlobCreatedEventData) toSystemEventData(events[0]); assertEquals("https: } @Test public void consumeStorageBlobDeletedEvent() throws IOException { String jsonData = getTestPayloadFromFile("StorageBlobDeletedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(StorageBlobDeletedEventData.class, toSystemEventData(events[0])); StorageBlobDeletedEventData eventData = (StorageBlobDeletedEventData) toSystemEventData(events[0]); assertEquals("https: } @Test public void consumeCloudEventStorageBlobRenamedEvent() { String jsonData = "[ { \"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Storage/storageAccounts/myaccount\", \"subject\": \"/blobServices/default/containers/testcontainer/blobs/testfile.txt\", \"type\": \"Microsoft.Storage.BlobRenamed\", \"time\": \"2017-08-16T01:57:26.005121Z\", \"id\": \"602a88ef-0001-00e6-1233-1646070610ea\", \"data\": { \"api\": \"RenameFile\", \"clientRequestId\": \"799304a4-bbc5-45b6-9849-ec2c66be800a\", \"requestId\": \"602a88ef-0001-00e6-1233-164607000000\", \"eTag\": \"0x8D4E44A24ABE7F1\", \"destinationUrl\": \"https: CloudEvent[] events = CloudEvent.fromString(jsonData).toArray(new CloudEvent[0]); assertNotNull(events); assertInstanceOf(StorageBlobRenamedEventData.class, toSystemEventData(events[0])); StorageBlobRenamedEventData eventData = (StorageBlobRenamedEventData) toSystemEventData(events[0]); assertEquals("https: } @Test public void consumeStorageDirectoryCreatedEvent() { String requestContent = "[ { \"topic\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Storage/storageAccounts/myaccount\", \"subject\": \"/blobServices/default/containers/testcontainer/blobs/testDir\", \"eventType\": \"Microsoft.Storage.DirectoryCreated\", \"eventTime\": \"2017-08-16T01:57:26.005121Z\", \"id\": \"602a88ef-0001-00e6-1233-1646070610ea\", \"data\": { \"api\": \"CreateDirectory\", \"clientRequestId\": \"799304a4-bbc5-45b6-9849-ec2c66be800a\", \"requestId\": \"602a88ef-0001-00e6-1233-164607000000\", \"eTag\": \"0x8D4E44A24ABE7F1\", \"url\": \"https: List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); StorageDirectoryCreatedEventData eventData = (StorageDirectoryCreatedEventData) toSystemEventData(events.get(0)); assertEquals("https: } @Test public void consumeStorageDirectoryDeletedEvent() { String requestContent = "[{ \"topic\": \"/subscriptions/id/resourceGroups/Storage/providers/Microsoft.Storage/storageAccounts/xstoretestaccount\", \"subject\": \"/blobServices/default/containers/testcontainer/blobs/testDir\", \"eventType\": \"Microsoft.Storage.DirectoryDeleted\", \"eventTime\": \"2017-11-07T20:09:22.5674003Z\", \"id\": \"4c2359fe-001e-00ba-0e04-58586806d298\", \"data\": { \"api\": \"DeleteDirectory\", \"requestId\": \"4c2359fe-001e-00ba-0e04-585868000000\", \"url\": \"https: List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); StorageDirectoryDeletedEventData eventData = (StorageDirectoryDeletedEventData) toSystemEventData(events.get(0)); assertEquals("https: } @Test public void consumeStorageDirectoryRenamedEvent() { String requestContent = "[{ \"topic\": \"/subscriptions/id/resourceGroups/Storage/providers/Microsoft.Storage/storageAccounts/xstoretestaccount\", \"subject\": \"/blobServices/default/containers/testcontainer/blobs/testDir\", \"eventType\": \"Microsoft.Storage.DirectoryRenamed\", \"eventTime\": \"2017-11-07T20:09:22.5674003Z\", \"id\": \"4c2359fe-001e-00ba-0e04-58586806d298\", \"data\": { \"api\": \"RenameDirectory\", \"requestId\": \"4c2359fe-001e-00ba-0e04-585868000000\", \"destinationUrl\": \"https: List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); StorageDirectoryRenamedEventData eventData = (StorageDirectoryRenamedEventData) toSystemEventData(events.get(0)); assertEquals("https: } @Test public void consumeResourceWriteSuccessEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceWriteSuccessEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(ResourceWriteSuccessEventData.class, toSystemEventData(events[0])); ResourceWriteSuccessEventData eventData = (ResourceWriteSuccessEventData) toSystemEventData(events[0]); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeMachineLearningServicesModelRegisteredEvent() { String requestContent = "[{\"topic\":\"/subscriptions/a5fe3bc5-98f0-4c84-affc-a589f54d9b23/resourceGroups/jenns/providers/Microsoft.MachineLearningServices/workspaces/jenns-canary\",\"eventType\":\"Microsoft.MachineLearningServices.ModelRegistered\",\"subject\":\"models/sklearn_regression_model:3\",\"eventTime\":\"2019-10-17T22:23:57.5350054+00:00\",\"id\":\"3b73ee51-bbf4-480d-9112-cfc23b41bfdb\",\"data\":{\"modelName\":\"sklearn_regression_model\",\"modelVersion\":\"3\",\"modelTags\":{\"area\":\"diabetes\",\"type\":\"regression\"},\"modelProperties\":{\"area\":\"test\"}},\"dataVersion\":\"2\",\"metadataVersion\":\"1\"}]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); MachineLearningServicesModelRegisteredEventData eventData = (MachineLearningServicesModelRegisteredEventData) toSystemEventData(events.get(0)); assertEquals("sklearn_regression_model", eventData.getModelName()); assertEquals("3", eventData.getModelVersion()); assertInstanceOf(Map.class, eventData.getModelTags()); assertEquals("regression", ((Map<?, ?>) eventData.getModelTags()).get("type")); assertInstanceOf(Map.class, eventData.getModelProperties()); assertEquals("test", ((Map<?, ?>) eventData.getModelProperties()).get("area")); } @Test public void consumeMachineLearningServicesModelDeployedEvent() { String requestContent = "[{\"topic\":\"/subscriptions/a5fe3bc5-98f0-4c84-affc-a589f54d9b23/resourceGroups/jenns/providers/Microsoft.MachineLearningServices/workspaces/jenns-canary\",\"eventType\":\"Microsoft.MachineLearningServices.ModelDeployed\",\"subject\":\"endpoints/aciservice1\",\"eventTime\":\"2019-10-23T18:20:08.8824474+00:00\",\"id\":\"40d0b167-be44-477b-9d23-a2befba7cde0\",\"data\":{\"serviceName\":\"aciservice1\",\"serviceComputeType\":\"ACI\",\"serviceTags\":{\"mytag\":\"test tag\"},\"serviceProperties\":{\"myprop\":\"test property\"},\"modelIds\":\"my_first_model:1,my_second_model:1\"},\"dataVersion\":\"2\",\"metadataVersion\":\"1\"}]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); MachineLearningServicesModelDeployedEventData eventData = (MachineLearningServicesModelDeployedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals("aciservice1", eventData.getServiceName()); assertEquals(2, eventData.getModelIds().split(",").length); } @Test public void consumeMachineLearningServicesRunCompletedEvent() { String requestContent = "[{\"topic\":\"/subscriptions/a5fe3bc5-98f0-4c84-affc-a589f54d9b23/resourceGroups/jenns/providers/Microsoft.MachineLearningServices/workspaces/jenns-canary\",\"eventType\":\"Microsoft.MachineLearningServices.RunCompleted\",\"subject\":\"experiments/0fa9dfaa-cba3-4fa7-b590-23e48548f5c1/runs/AutoML_ad912b2d-6467-4f32-a616-dbe4af6dd8fc\",\"eventTime\":\"2019-10-18T19:29:55.8856038+00:00\",\"id\":\"044ac44d-462c-4043-99eb-d9e01dc760ab\",\"data\":{\"experimentId\":\"0fa9dfaa-cba3-4fa7-b590-23e48548f5c1\",\"experimentName\":\"automl-local-regression\",\"runId\":\"AutoML_ad912b2d-6467-4f32-a616-dbe4af6dd8fc\",\"runType\":\"automl\",\"RunTags\":{\"experiment_status\":\"ModelSelection\",\"experiment_status_descr\":\"Beginning model selection.\"},\"runProperties\":{\"num_iterations\":\"10\",\"target\":\"local\"}},\"dataVersion\":\"2\",\"metadataVersion\":\"1\"}]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); MachineLearningServicesRunCompletedEventData eventData = (MachineLearningServicesRunCompletedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals("AutoML_ad912b2d-6467-4f32-a616-dbe4af6dd8fc", eventData.getRunId()); assertEquals("automl-local-regression", eventData.getExperimentName()); } @Test public void consumeMachineLearningServicesRunStatusChangedEvent() { String requestContent = "[{\"topic\":\"/subscriptions/a5fe3bc5-98f0-4c84-affc-a589f54d9b23/resourceGroups/jenns/providers/Microsoft.MachineLearningServices/workspaces/jenns-canary\",\"eventType\":\"Microsoft.MachineLearningServices.RunStatusChanged\",\"subject\":\"experiments/0fa9dfaa-cba3-4fa7-b590-23e48548f5c1/runs/AutoML_ad912b2d-6467-4f32-a616-dbe4af6dd8fc\",\"eventTime\":\"2020-03-09T23:53:04.4579724Z\",\"id\":\"aa8cd7df-fe28-5d5d-9b40-3342dbc2a887\",\"data\":{\"runStatus\": \"Running\",\"experimentId\":\"0fa9dfaa-cba3-4fa7-b590-23e48548f5c1\",\"experimentName\":\"automl-local-regression\",\"runId\":\"AutoML_ad912b2d-6467-4f32-a616-dbe4af6dd8fc\",\"runType\":\"automl\",\"runTags\":{\"experiment_status\":\"ModelSelection\",\"experiment_status_descr\":\"Beginning model selection.\"},\"runProperties\":{\"num_iterations\":\"10\",\"target\":\"local\"}},\"dataVersion\":\"2\",\"metadataVersion\":\"1\"}]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); MachineLearningServicesRunStatusChangedEventData eventData = (MachineLearningServicesRunStatusChangedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals("AutoML_ad912b2d-6467-4f32-a616-dbe4af6dd8fc", eventData.getRunId()); assertEquals("automl-local-regression", eventData.getExperimentName()); assertEquals("Running", eventData.getRunStatus()); assertEquals("automl", eventData.getRunType()); } @Test public void consumeMachineLearningServicesDatasetDriftDetectedEvent() { String requestContent = "[{\"topic\":\"/subscriptions/60582a10-b9fd-49f1-a546-c4194134bba8/resourceGroups/copetersRG/providers/Microsoft.MachineLearningServices/workspaces/driftDemoWS\",\"eventType\":\"Microsoft.MachineLearningServices.DatasetDriftDetected\",\"subject\":\"datadrift/01d29aa4-e6a4-470a-9ef3-66660d21f8ef/run/01d29aa4-e6a4-470a-9ef3-66660d21f8ef_1571590300380\",\"eventTime\":\"2019-10-20T17:08:08.467191+00:00\",\"id\":\"2684de79-b145-4dcf-ad2e-6a1db798585f\",\"data\":{\"dataDriftId\":\"01d29aa4-e6a4-470a-9ef3-66660d21f8ef\",\"dataDriftName\":\"copetersDriftMonitor3\",\"runId\":\"01d29aa4-e6a4-470a-9ef3-66660d21f8ef_1571590300380\",\"baseDatasetId\":\"3c56d136-0f64-4657-a0e8-5162089a88a3\",\"tarAsSystemEventDatasetId\":\"d7e74d2e-c972-4266-b5fb-6c9c182d2a74\",\"driftCoefficient\":0.8350349068479208,\"startTime\":\"2019-07-04T00:00:00+00:00\",\"endTime\":\"2019-07-05T00:00:00+00:00\"},\"dataVersion\":\"2\",\"metadataVersion\":\"1\"}]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); MachineLearningServicesDatasetDriftDetectedEventData eventData = (MachineLearningServicesDatasetDriftDetectedEventData) toSystemEventData(events.get(0)); assertEquals("copetersDriftMonitor3", eventData.getDataDriftName()); } @Test public void consumeCloudEventWebAppUpdatedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.AppUpdated\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebAppUpdatedEventData eventData = (WebAppUpdatedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebBackupOperationStartedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.BackupOperationStarted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebBackupOperationStartedEventData eventData = (WebBackupOperationStartedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebBackupOperationCompletedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.BackupOperationCompleted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebBackupOperationCompletedEventData eventData = (WebBackupOperationCompletedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebBackupOperationFailedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.BackupOperationFailed\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebBackupOperationFailedEventData eventData = (WebBackupOperationFailedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebRestoreOperationStartedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.RestoreOperationStarted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebRestoreOperationStartedEventData eventData = (WebRestoreOperationStartedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebRestoreOperationCompletedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.RestoreOperationCompleted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebRestoreOperationCompletedEventData eventData = (WebRestoreOperationCompletedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebRestoreOperationFailedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.RestoreOperationFailed\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebRestoreOperationFailedEventData eventData = (WebRestoreOperationFailedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebSlotSwapStartedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.SlotSwapStarted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent, false); WebSlotSwapStartedEventData eventData = (WebSlotSwapStartedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebSlotSwapCompletedEvent() { String siteName = "testSite01"; String requestContent = "[{\"specversion\": \"1.0\", \"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.SlotSwapCompleted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"}}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebSlotSwapCompletedEventData eventData = (WebSlotSwapCompletedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebSlotSwapFailedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.SlotSwapFailed\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"}, \"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebSlotSwapFailedEventData eventData = (WebSlotSwapFailedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebSlotSwapWithPreviewStartedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.SlotSwapWithPreviewStarted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"}, \"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebSlotSwapWithPreviewStartedEventData eventData = (WebSlotSwapWithPreviewStartedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebSlotSwapWithPreviewCancelledEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.SlotSwapWithPreviewCancelled\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"}, \"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebSlotSwapWithPreviewCancelledEventData eventData = (WebSlotSwapWithPreviewCancelledEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebAppServicePlanUpdatedEvent() { String planName = "testPlan01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/serverfarms/testPlan01\", \"subject\": \"/Microsoft.Web/serverfarms/testPlan01\",\"type\": \"Microsoft.Web.AppServicePlanUpdated\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appServicePlanEventTypeDetail\": { \"stampKind\": \"Public\",\"action\": \"Updated\",\"status\": \"Started\" },\"name\": \"testPlan01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\",\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebAppServicePlanUpdatedEventData eventData = (WebAppServicePlanUpdatedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(planName, eventData.getName()); } @Test public void consumeFhirResourceCreatedEvent() { String requestContent = "[ { \"subject\":\"{fhir-account}.fhir.azurehealthcareapis.com/Patient/e0a1f743-1a70-451f-830e-e96477163902\", \"eventType\":\"Microsoft.HealthcareApis.FhirResourceCreated\", \"eventTime\":\"2017-08-16T03:54:38.2696833Z\", \"id\":\"25b3b0d0-d79b-44d5-9963-440d4e6a9bba\", \"data\": { \"resourceType\": \"Patient\", \"resourceFhirAccount\": \"{fhir-account}.fhir.azurehealthcareapis.com\", \"resourceFhirId\": \"e0a1f743-1a70-451f-830e-e96477163902\", \"resourceVersionId\": 1 }, \"dataVersion\": \"1.0\" }]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); HealthcareFhirResourceCreatedEventData eventData = (HealthcareFhirResourceCreatedEventData) toSystemEventData(events.get(0)); assertEquals(HealthcareFhirResourceType.PATIENT, eventData.getFhirResourceType()); assertEquals("{fhir-account}.fhir.azurehealthcareapis.com", eventData.getFhirServiceHostName()); assertEquals("e0a1f743-1a70-451f-830e-e96477163902", eventData.getFhirResourceId()); assertEquals(1, eventData.getFhirResourceVersionId()); } @Test public void consumeFhirResourceUpdatedEvent() { String requestContent = "[ { \"subject\":\"{fhir-account}.fhir.azurehealthcareapis.com/Patient/e0a1f743-1a70-451f-830e-e96477163902\", \"eventType\":\"Microsoft.HealthcareApis.FhirResourceUpdated\", \"eventTime\":\"2017-08-16T03:54:38.2696833Z\", \"id\":\"25b3b0d0-d79b-44d5-9963-440d4e6a9bba\", \"data\": { \"resourceType\": \"Patient\", \"resourceFhirAccount\": \"{fhir-account}.fhir.azurehealthcareapis.com\", \"resourceFhirId\": \"e0a1f743-1a70-451f-830e-e96477163902\", \"resourceVersionId\": 1 }, \"dataVersion\": \"1.0\" }]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); HealthcareFhirResourceUpdatedEventData eventData = (HealthcareFhirResourceUpdatedEventData) toSystemEventData(events.get(0)); assertEquals(HealthcareFhirResourceType.PATIENT, eventData.getFhirResourceType()); assertEquals("{fhir-account}.fhir.azurehealthcareapis.com", eventData.getFhirServiceHostName()); assertEquals("e0a1f743-1a70-451f-830e-e96477163902", eventData.getFhirResourceId()); assertEquals(1, eventData.getFhirResourceVersionId()); } @Test public void consumeFhirResourceDeletedEvent() { String requestContent = "[ { \"subject\":\"{fhir-account}.fhir.azurehealthcareapis.com/Patient/e0a1f743-1a70-451f-830e-e96477163902\", \"eventType\":\"Microsoft.HealthcareApis.FhirResourceDeleted\", \"eventTime\":\"2017-08-16T03:54:38.2696833Z\", \"id\":\"25b3b0d0-d79b-44d5-9963-440d4e6a9bba\", \"data\": { \"resourceType\": \"Patient\", \"resourceFhirAccount\": \"{fhir-account}.fhir.azurehealthcareapis.com\", \"resourceFhirId\": \"e0a1f743-1a70-451f-830e-e96477163902\", \"resourceVersionId\": 1 }, \"dataVersion\": \"1.0\" }]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); HealthcareFhirResourceDeletedEventData eventData = (HealthcareFhirResourceDeletedEventData) toSystemEventData(events.get(0)); assertEquals(HealthcareFhirResourceType.PATIENT, eventData.getFhirResourceType()); assertEquals("{fhir-account}.fhir.azurehealthcareapis.com", eventData.getFhirServiceHostName()); assertEquals("e0a1f743-1a70-451f-830e-e96477163902", eventData.getFhirResourceId()); assertEquals(1, eventData.getFhirResourceVersionId()); } @Test public void consumeCloudEventFhirResourceCreatedEvent() { String requestContent = "[ { \"source\": \"/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.HealthcareApis/workspaces/{workspace-name}\", \"subject\":\"{fhir-account}.fhir.azurehealthcareapis.com/Patient/e0a1f743-1a70-451f-830e-e96477163902\", \"type\":\"Microsoft.HealthcareApis.FhirResourceCreated\", \"time\":\"2017-08-16T03:54:38.2696833Z\", \"id\":\"25b3b0d0-d79b-44d5-9963-440d4e6a9bba\", \"data\": { \"resourceType\": \"Patient\", \"resourceFhirAccount\": \"{fhir-account}.fhir.azurehealthcareapis.com\", \"resourceFhirId\": \"e0a1f743-1a70-451f-830e-e96477163902\", \"resourceVersionId\": 1 }, \"specversion\": \"1.0\" }]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); HealthcareFhirResourceCreatedEventData eventData = (HealthcareFhirResourceCreatedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(HealthcareFhirResourceType.PATIENT, eventData.getFhirResourceType()); assertEquals("{fhir-account}.fhir.azurehealthcareapis.com", eventData.getFhirServiceHostName()); assertEquals("e0a1f743-1a70-451f-830e-e96477163902", eventData.getFhirResourceId()); assertEquals(1, eventData.getFhirResourceVersionId()); } @Test public void consumeCloudEventFhirResourceUpdatedEvent() { String requestContent = "[ { \"source\": \"/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.HealthcareApis/workspaces/{workspace-name}\", \"subject\":\"{fhir-account}.fhir.azurehealthcareapis.com/Patient/e0a1f743-1a70-451f-830e-e96477163902\", \"type\":\"Microsoft.HealthcareApis.FhirResourceUpdated\", \"time\":\"2017-08-16T03:54:38.2696833Z\", \"id\":\"25b3b0d0-d79b-44d5-9963-440d4e6a9bba\", \"data\": { \"resourceType\": \"Patient\", \"resourceFhirAccount\": \"{fhir-account}.fhir.azurehealthcareapis.com\", \"resourceFhirId\": \"e0a1f743-1a70-451f-830e-e96477163902\", \"resourceVersionId\": 1 }, \"specversion\": \"1.0\" }]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); HealthcareFhirResourceUpdatedEventData eventData = (HealthcareFhirResourceUpdatedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(HealthcareFhirResourceType.PATIENT, eventData.getFhirResourceType()); assertEquals("{fhir-account}.fhir.azurehealthcareapis.com", eventData.getFhirServiceHostName()); assertEquals("e0a1f743-1a70-451f-830e-e96477163902", eventData.getFhirResourceId()); assertEquals(1, eventData.getFhirResourceVersionId()); } @Test public void consumeCloudEventFhirResourceDeletedEvent() { String requestContent = "[ { \"source\": \"/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.HealthcareApis/workspaces/{workspace-name}\", \"subject\":\"{fhir-account}.fhir.azurehealthcareapis.com/Patient/e0a1f743-1a70-451f-830e-e96477163902\", \"type\":\"Microsoft.HealthcareApis.FhirResourceDeleted\", \"time\":\"2017-08-16T03:54:38.2696833Z\", \"id\":\"25b3b0d0-d79b-44d5-9963-440d4e6a9bba\", \"data\": { \"resourceType\": \"Patient\", \"resourceFhirAccount\": \"{fhir-account}.fhir.azurehealthcareapis.com\", \"resourceFhirId\": \"e0a1f743-1a70-451f-830e-e96477163902\", \"resourceVersionId\": 1 }, \"specversion\": \"1.0\" }]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); HealthcareFhirResourceDeletedEventData eventData = (HealthcareFhirResourceDeletedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(HealthcareFhirResourceType.PATIENT, eventData.getFhirResourceType()); assertEquals("{fhir-account}.fhir.azurehealthcareapis.com", eventData.getFhirServiceHostName()); assertEquals("e0a1f743-1a70-451f-830e-e96477163902", eventData.getFhirResourceId()); assertEquals(1, eventData.getFhirResourceVersionId()); } @Test public void verifyAcsRouterJobClassificationFailedEventDataErrors() { ResponseError error = new ResponseError("InvalidRequest", "The request is invalid"); AcsRouterJobClassificationFailedEventData eventData = new AcsRouterJobClassificationFailedEventData(); eventData.setErrors(Collections.singletonList(error)); List<ResponseError> errors = eventData.getErrors(); assertEquals(1, errors.size()); assertEquals("InvalidRequest", errors.get(0).getCode()); assertEquals("The request is invalid", errors.get(0).getMessage()); } private String getTestPayloadFromFile(String fileName) throws IOException { ClassLoader classLoader = getClass().getClassLoader(); try (InputStream inputStream = classLoader.getResourceAsStream("customization/" + fileName)) { byte[] bytes = new byte[inputStream.available()]; inputStream.read(bytes); return new String(bytes); } } }
class DeserializationTests { static <T> Object toSystemEventData(EventGridEvent event) { return getSystemEventData(event.getData(), event.getEventType()); } static <T> Object toSystemEventData(CloudEvent event) { return getSystemEventData(event.getData(), event.getType()); } static Object getSystemEventData(BinaryData data, String eventType) { if (SystemEventNames.getSystemEventMappings().containsKey(eventType)) { return data .toObject(TypeReference.createInstance(SystemEventNames.getSystemEventMappings().get(eventType))); } return null; } @ParameterizedTest @MethodSource("getObjectsForRoundTrip") public void testEventGridRoundTripStreamSerialization(BinaryData payload) { EventGridEvent eventGridEvent = new EventGridEvent("subject", "eventType", payload, "dataVersion"); ByteArrayOutputStream stream = new ByteArrayOutputStream(); try { JsonWriter writer = JsonProviders.createWriter(stream); eventGridEvent.toJson(writer); writer.flush(); try (JsonReader reader = JsonProviders.createReader(stream.toByteArray())) { EventGridEvent deserializedEvent = EventGridEvent.fromJson(reader); assertEquals(eventGridEvent.getSubject(), deserializedEvent.getSubject()); assertEquals(eventGridEvent.getEventType(), deserializedEvent.getEventType()); assertArrayEquals(eventGridEvent.getData().toBytes(), deserializedEvent.getData().toBytes()); assertEquals(eventGridEvent.getDataVersion(), deserializedEvent.getDataVersion()); } } catch (IOException e) { throw new RuntimeException(e); } } private static Stream<Arguments> getObjectsForRoundTrip() { return Stream.of( Arguments.of(BinaryData.fromObject(1)), Arguments.of(BinaryData.fromObject("data")), Arguments.of(BinaryData.fromString("{\"data\":\"data\"}")), Arguments.of(BinaryData.fromObject(true)) ); } @Test @Test public void consumeEventGridEventWithoutArrayBrackets() throws IOException { String jsonData = getTestPayloadFromFile("EventGridEventNoArray.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(1, events.length); StorageBlobDeletedEventData eventData = assertInstanceOf(StorageBlobDeletedEventData.class, toSystemEventData(events[0])); assertEquals("https: } @Test public void consumeEventGridEventWithNullData() throws IOException { String jsonData = getTestPayloadFromFile("EventGridNullData.json"); assertThrows(IllegalArgumentException.class, () -> { EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); }); } @Test public void consumeCustomEvents() throws IOException { String jsonData = getTestPayloadFromFile("CustomEvents.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(1, events.length); assertNotNull(events[0].getData().toObject(TypeReference.createInstance(ContosoItemReceivedEventData.class))); ContosoItemReceivedEventData eventData = events[0].getData().toObject(TypeReference.createInstance(ContosoItemReceivedEventData.class)); assertEquals("512d38b6-c7b8-40c8-89fe-f46f9e9622b6", eventData.getItemSku()); } @Test public void consumeCustomEventWithArrayData() throws IOException { String jsonData = getTestPayloadFromFile("CustomEventWithArrayData.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(1, events.length); List<ContosoItemReceivedEventData> eventData = events[0].getData().toObject(new TypeReference<List<ContosoItemReceivedEventData>>() {}); assertNotNull(eventData); assertEquals("512d38b6-c7b8-40c8-89fe-f46f9e9622b6", (eventData.get(0)).getItemSku()); } @Test public void consumeCustomEventWithBooleanData() throws IOException { String jsonData = getTestPayloadFromFile("CustomEventWithBooleanData.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(1, events.length); Boolean eventData = events[0].getData().toObject(TypeReference.createInstance(Boolean.class)); assertNotNull(eventData); assertTrue(eventData); } @Test public void consumeCustomEventWithStringData() throws IOException { String jsonData = getTestPayloadFromFile("CustomEventWithStringData.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(1, events.length); String eventData = events[0].getData().toObject(String.class); assertNotNull(eventData); assertEquals("stringdata", eventData); } @Test public void consumeCustomEventWithPolymorphicData() throws IOException { String jsonData = getTestPayloadFromFile("CustomEventWithPolymorphicData.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(2, events.length); ContosoItemSentEventData eventData0 = events[0].getData().toObject(ContosoItemSentEventData.class); ContosoItemSentEventData eventData1 = events[1].getData().toObject(ContosoItemSentEventData.class); assertNotNull(eventData0); assertNotNull(eventData1); assertInstanceOf(DroneShippingInfo.class, eventData0.getShippingInfo()); assertInstanceOf(RocketShippingInfo.class, eventData1.getShippingInfo()); } @Test public void consumeMultipleEventsInSameBatch() throws IOException { String jsonData = getTestPayloadFromFile("MultipleEventsInSameBatch.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertEquals(4, events.length); assertInstanceOf(StorageBlobCreatedEventData.class, toSystemEventData(events[0])); assertInstanceOf(StorageBlobDeletedEventData.class, toSystemEventData(events[1])); StorageBlobDeletedEventData eventData = assertInstanceOf(StorageBlobDeletedEventData.class, toSystemEventData(events[2])); assertInstanceOf(ServiceBusDeadletterMessagesAvailableWithNoListenersEventData.class, toSystemEventData(events[3])); assertEquals("https: } @Test public void consumeAppConfigurationKeyValueDeletedEvent() throws IOException { String jsonData = getTestPayloadFromFile("AppConfigurationKeyValueDeleted.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); AppConfigurationKeyValueDeletedEventData eventData = assertInstanceOf(AppConfigurationKeyValueDeletedEventData.class, toSystemEventData(events[0])); assertEquals("key1", eventData.getKey()); } @Test public void consumeAppConfigurationKeyValueModifiedEvent() throws IOException { String jsonData = getTestPayloadFromFile("AppConfigurationKeyValueModified.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); AppConfigurationKeyValueModifiedEventData eventData = assertInstanceOf(AppConfigurationKeyValueModifiedEventData.class, toSystemEventData(events[0])); assertEquals("key1", eventData.getKey()); } @Test public void consumeContainerRegistryImagePushedEvent() throws IOException { String jsonData = getTestPayloadFromFile("ContainerRegistryImagePushedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ContainerRegistryImagePushedEventData eventData = assertInstanceOf(ContainerRegistryImagePushedEventData.class, toSystemEventData(events[0])); assertEquals("127.0.0.1", eventData.getRequest().getAddr()); } @Test public void consumeContainerRegistryImageDeletedEvent() throws IOException { String jsonData = getTestPayloadFromFile("ContainerRegistryImageDeletedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ContainerRegistryImageDeletedEventData eventData = assertInstanceOf(ContainerRegistryImageDeletedEventData.class, toSystemEventData(events[0])); assertEquals("testactor", eventData.getActor().getName()); } @Test public void consumeContainerRegistryChartDeletedEvent() throws IOException { String jsonData = getTestPayloadFromFile("ContainerRegistryChartDeletedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ContainerRegistryChartDeletedEventData eventData = assertInstanceOf(ContainerRegistryChartDeletedEventData.class, toSystemEventData(events[0])); assertEquals("mediatype1", eventData.getTarget().getMediaType()); } @Test public void consumeContainerRegistryChartPushedEvent() throws IOException { String jsonData = getTestPayloadFromFile("ContainerRegistryChartPushedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ContainerRegistryChartPushedEventData eventData = assertInstanceOf(ContainerRegistryChartPushedEventData.class, toSystemEventData(events[0])); assertEquals("mediatype1", eventData.getTarget().getMediaType()); } @Test public void consumeIoTHubDeviceCreatedEvent() throws IOException { String jsonData = getTestPayloadFromFile("IoTHubDeviceCreatedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); IotHubDeviceCreatedEventData eventData = assertInstanceOf(IotHubDeviceCreatedEventData.class, toSystemEventData(events[0])); assertEquals("enabled", eventData.getTwin().getStatus()); } @Test public void consumeIoTHubDeviceDeletedEvent() throws IOException { String jsonData = getTestPayloadFromFile("IoTHubDeviceDeletedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); IotHubDeviceDeletedEventData eventData = assertInstanceOf(IotHubDeviceDeletedEventData.class, toSystemEventData(events[0])); assertEquals("AAAAAAAAAAE=", eventData.getTwin().getEtag()); } @Test public void consumeIoTHubDeviceConnectedEvent() throws IOException { String jsonData = getTestPayloadFromFile("IoTHubDeviceConnectedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); IotHubDeviceConnectedEventData eventData = assertInstanceOf(IotHubDeviceConnectedEventData.class, toSystemEventData(events[0])); assertEquals("EGTESTHUB1", eventData.getHubName()); } @Test public void consumeIoTHubDeviceDisconnectedEvent() throws IOException { String jsonData = getTestPayloadFromFile("IoTHubDeviceDisconnectedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); IotHubDeviceDisconnectedEventData eventData = assertInstanceOf(IotHubDeviceDisconnectedEventData.class, toSystemEventData(events[0])); assertEquals("000000000000000001D4132452F67CE200000002000000000000000000000002", eventData.getDeviceConnectionStateEventInfo().getSequenceNumber()); } @Test public void consumeIoTHubDeviceTelemetryEvent() throws IOException { String jsonData = getTestPayloadFromFile("IoTHubDeviceTelemetryEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); IotHubDeviceTelemetryEventData eventData = assertInstanceOf(IotHubDeviceTelemetryEventData.class, toSystemEventData(events[0])); assertEquals("Active", eventData.getProperties().get("Status")); } @Test public void consumeEventGridSubscriptionValidationEvent() throws IOException { String jsonData = getTestPayloadFromFile("EventGridSubscriptionValidationEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); SubscriptionValidationEventData eventData = assertInstanceOf(SubscriptionValidationEventData.class, toSystemEventData(events[0])); assertEquals("512d38b6-c7b8-40c8-89fe-f46f9e9622b6", eventData.getValidationCode()); } @Test public void consumeEventGridSubscriptionDeletedEvent() throws IOException { String jsonData = getTestPayloadFromFile("EventGridSubscriptionDeletedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); SubscriptionDeletedEventData eventData = assertInstanceOf(SubscriptionDeletedEventData.class, toSystemEventData(events[0])); assertEquals("/subscriptions/id/resourceGroups/rg/providers/Microsoft.EventGrid/topics/topic1/providers/Microsoft.EventGrid/eventSubscriptions/eventsubscription1", eventData.getEventSubscriptionId()); } @Test public void consumeEventHubCaptureFileCreatedEvent() throws IOException { String jsonData = getTestPayloadFromFile("EventHubCaptureFileCreatedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); EventHubCaptureFileCreatedEventData eventData = assertInstanceOf(EventHubCaptureFileCreatedEventData.class, toSystemEventData(events[0])); assertEquals("AzureBlockBlob", eventData.getFileType()); } @Test public void consumeMapsGeoFenceEnteredEvent() throws IOException { String jsonData = getTestPayloadFromFile("MapsGeofenceEnteredEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MapsGeofenceEnteredEventData eventData = assertInstanceOf(MapsGeofenceEnteredEventData.class, toSystemEventData(events[0])); assertEquals(true, eventData.isEventPublished()); } @Test public void consumeMapsGeoFenceExitedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MapsGeofenceExitedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MapsGeofenceExitedEventData eventData = assertInstanceOf(MapsGeofenceExitedEventData.class, toSystemEventData(events[0])); assertEquals(true, eventData.isEventPublished()); } @Test public void consumeMapsGeoFenceResultEvent() throws IOException { String jsonData = getTestPayloadFromFile("MapsGeofenceResultEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MapsGeofenceResultEventData eventData = assertInstanceOf(MapsGeofenceResultEventData.class, toSystemEventData(events[0])); assertEquals(true, eventData.isEventPublished()); } @Test public void consumeMediaJobCanceledEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobCanceledEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobCanceledEventData eventData = assertInstanceOf(MediaJobCanceledEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.CANCELING, eventData.getPreviousState()); assertEquals(MediaJobState.CANCELED, eventData.getState()); assertEquals(1, eventData.getOutputs().size()); MediaJobOutputAsset outputAsset = assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutputs().get(0)); assertEquals(MediaJobState.CANCELED, outputAsset.getState()); assertNull(outputAsset.getError()); assertNotEquals(100, outputAsset.getProgress()); assertEquals("output-7a8215f9-0f8d-48a6-82ed-1ead772bc221", outputAsset.getAssetName()); } @Test public void consumeMediaJobCancelingEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobCancelingEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobCancelingEventData eventData = assertInstanceOf(MediaJobCancelingEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.PROCESSING, eventData.getPreviousState()); assertEquals(MediaJobState.CANCELING, eventData.getState()); } @Test public void consumeMediaJobProcessingEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobProcessingEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobProcessingEventData eventData = assertInstanceOf(MediaJobProcessingEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.SCHEDULED, eventData.getPreviousState()); assertEquals(MediaJobState.PROCESSING, eventData.getState()); } @Test public void consumeMediaJobFinishedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobFinishedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobFinishedEventData eventData = assertInstanceOf(MediaJobFinishedEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.PROCESSING, eventData.getPreviousState()); assertEquals(MediaJobState.FINISHED, eventData.getState()); assertEquals(1, eventData.getOutputs().size()); MediaJobOutputAsset outputAsset = assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutputs().get(0)); assertEquals(MediaJobState.FINISHED, outputAsset.getState()); assertNull(outputAsset.getError()); assertEquals(100, outputAsset.getProgress()); assertEquals("output-298338bb-f8d1-4d0f-9fde-544e0ac4d983", outputAsset.getAssetName()); } @Test public void consumeMediaJobErroredEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobErroredEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobErroredEventData eventData = assertInstanceOf(MediaJobErroredEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.PROCESSING, eventData.getPreviousState()); assertEquals(MediaJobState.ERROR, eventData.getState()); assertEquals(1, eventData.getOutputs().size()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutputs().get(0)); assertEquals(MediaJobState.ERROR, eventData.getOutputs().get(0).getState()); assertNotNull(eventData.getOutputs().get(0).getError()); assertEquals(MediaJobErrorCategory.SERVICE, eventData.getOutputs().get(0).getError().getCategory()); assertEquals(MediaJobErrorCode.SERVICE_ERROR, eventData.getOutputs().get(0).getError().getCode()); } @Test public void consumeMediaJobOutputStateChangeEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputStateChangeEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobOutputStateChangeEventData eventData = assertInstanceOf(MediaJobOutputStateChangeEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.SCHEDULED, eventData.getPreviousState()); assertEquals(MediaJobState.PROCESSING, eventData.getOutput().getState()); MediaJobOutputAsset outputAsset = assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); assertEquals("output-2ac2fe75-6557-4de5-ab25-5713b74a6901", outputAsset.getAssetName()); } @Test public void consumeMediaJobScheduledEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobScheduledEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobScheduledEventData eventData = assertInstanceOf(MediaJobScheduledEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.QUEUED, eventData.getPreviousState()); assertEquals(MediaJobState.SCHEDULED, eventData.getState()); } @Test public void consumeMediaJobOutputCanceledEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputCanceledEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobOutputCanceledEventData eventData = assertInstanceOf(MediaJobOutputCanceledEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.CANCELING, eventData.getPreviousState()); assertEquals(MediaJobState.CANCELED, eventData.getOutput().getState()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); } @Test public void consumeMediaJobOutputCancelingEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputCancelingEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobOutputCancelingEventData eventData = assertInstanceOf(MediaJobOutputCancelingEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.PROCESSING, eventData.getPreviousState()); assertEquals(MediaJobState.CANCELING, eventData.getOutput().getState()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); } @Test public void consumeMediaJobOutputErroredEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputErroredEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobOutputErroredEventData eventData = assertInstanceOf(MediaJobOutputErroredEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.PROCESSING, eventData.getPreviousState()); assertEquals(MediaJobState.ERROR, eventData.getOutput().getState()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); assertNotNull(eventData.getOutput().getError()); assertEquals(MediaJobErrorCategory.SERVICE, eventData.getOutput().getError().getCategory()); assertEquals(MediaJobErrorCode.SERVICE_ERROR, eventData.getOutput().getError().getCode()); } @Test public void consumeMediaJobOutputFinishedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputFinishedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobOutputFinishedEventData eventData = assertInstanceOf(MediaJobOutputFinishedEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.PROCESSING, eventData.getPreviousState()); assertEquals(MediaJobState.FINISHED, eventData.getOutput().getState()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); assertEquals(100, eventData.getOutput().getProgress()); MediaJobOutputAsset outputAsset = (MediaJobOutputAsset) eventData.getOutput(); assertEquals("output-2ac2fe75-6557-4de5-ab25-5713b74a6901", outputAsset.getAssetName()); } @Test public void consumeMediaJobOutputProcessingEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputProcessingEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobOutputProcessingEventData eventData = assertInstanceOf(MediaJobOutputProcessingEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.SCHEDULED, eventData.getPreviousState()); assertEquals(MediaJobState.PROCESSING, eventData.getOutput().getState()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); } @Test public void consumeMediaJobOutputScheduledEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputScheduledEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobOutputScheduledEventData eventData = assertInstanceOf(MediaJobOutputScheduledEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.QUEUED, eventData.getPreviousState()); assertEquals(MediaJobState.SCHEDULED, eventData.getOutput().getState()); assertInstanceOf(MediaJobOutputAsset.class, eventData.getOutput()); } @Test public void consumeMediaJobOutputProgressEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobOutputProgressEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobOutputProgressEventData eventData = assertInstanceOf(MediaJobOutputProgressEventData.class, toSystemEventData(events[0])); assertEquals("TestLabel", eventData.getLabel()); assertTrue(eventData.getJobCorrelationData().containsKey("Field1")); assertEquals("test1", eventData.getJobCorrelationData().get("Field1")); assertTrue(eventData.getJobCorrelationData().containsKey("Field2")); assertEquals("test2", eventData.getJobCorrelationData().get("Field2")); } @Test public void consumeMediaJobStateChangeEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaJobStateChangeEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaJobStateChangeEventData eventData = assertInstanceOf(MediaJobStateChangeEventData.class, toSystemEventData(events[0])); assertEquals(MediaJobState.SCHEDULED, eventData.getPreviousState()); assertEquals(MediaJobState.PROCESSING, eventData.getState()); } @Test public void consumeMediaLiveEventEncoderConnectedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventEncoderConnectedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaLiveEventEncoderConnectedEventData eventData = assertInstanceOf(MediaLiveEventEncoderConnectedEventData.class, toSystemEventData(events[0])); assertEquals("rtmp: assertEquals("Mystream1", eventData.getStreamId()); assertEquals("<ip address>", eventData.getEncoderIp()); assertEquals("3557", eventData.getEncoderPort()); } @Test public void consumeMediaLiveEventConnectionRejectedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventConnectionRejectedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaLiveEventConnectionRejectedEventData eventData = assertInstanceOf(MediaLiveEventConnectionRejectedEventData.class, toSystemEventData(events[0])); assertEquals("Mystream1", eventData.getStreamId()); } @Test public void consumeMediaLiveEventEncoderDisconnectedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventEncoderDisconnectedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaLiveEventEncoderDisconnectedEventData eventData = assertInstanceOf(MediaLiveEventEncoderDisconnectedEventData.class, toSystemEventData(events[0])); assertEquals("rtmp: assertEquals("Mystream1", eventData.getStreamId()); assertEquals("<ip address>", eventData.getEncoderIp()); assertEquals("3557", eventData.getEncoderPort()); } @Test public void consumeMediaLiveEventIncomingStreamReceivedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventIncomingStreamReceivedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaLiveEventIncomingStreamReceivedEventData eventData = assertInstanceOf(MediaLiveEventIncomingStreamReceivedEventData.class, toSystemEventData(events[0])); assertEquals("rtmp: assertEquals("<ip address>", eventData.getEncoderIp()); assertEquals("3557", eventData.getEncoderPort()); assertEquals("audio", eventData.getTrackType()); assertEquals("audio_160000", eventData.getTrackName()); assertEquals("66", eventData.getTimestamp()); assertEquals("1950", eventData.getDuration()); assertEquals("1000", eventData.getTimescale()); } @Test public void consumeMediaLiveEventIncomingStreamsOutOfSyncEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventIncomingStreamsOutOfSyncEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaLiveEventIncomingStreamsOutOfSyncEventData eventData = assertInstanceOf(MediaLiveEventIncomingStreamsOutOfSyncEventData.class, toSystemEventData(events[0])); assertEquals("10999", eventData.getMinLastTimestamp()); assertEquals("video", eventData.getTypeOfStreamWithMinLastTimestamp()); assertEquals("100999", eventData.getMaxLastTimestamp()); assertEquals("audio", eventData.getTypeOfStreamWithMaxLastTimestamp()); assertEquals("1000", eventData.getTimescaleOfMinLastTimestamp()); assertEquals("1000", eventData.getTimescaleOfMaxLastTimestamp()); } @Test public void consumeMediaLiveEventIncomingVideoStreamsOutOfSyncEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventIncomingVideoStreamsOutOfSyncEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaLiveEventIncomingVideoStreamsOutOfSyncEventData eventData = assertInstanceOf(MediaLiveEventIncomingVideoStreamsOutOfSyncEventData.class, toSystemEventData(events[0])); assertEquals("10999", eventData.getFirstTimestamp()); assertEquals("2000", eventData.getFirstDuration()); assertEquals("100999", eventData.getSecondTimestamp()); assertEquals("2000", eventData.getSecondDuration()); assertEquals("1000", eventData.getTimescale()); } @Test public void consumeMediaLiveEventIncomingDataChunkDroppedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventIncomingDataChunkDroppedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaLiveEventIncomingDataChunkDroppedEventData eventData = assertInstanceOf(MediaLiveEventIncomingDataChunkDroppedEventData.class, toSystemEventData(events[0])); assertEquals("8999", eventData.getTimestamp()); assertEquals("video", eventData.getTrackType()); assertEquals("video1", eventData.getTrackName()); assertEquals("1000", eventData.getTimescale()); assertEquals("FragmentDrop_OverlapTimestamp", eventData.getResultCode()); } @Test public void consumeMediaLiveEventIngestHeartbeatEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventIngestHeartbeatEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaLiveEventIngestHeartbeatEventData eventData = assertInstanceOf(MediaLiveEventIngestHeartbeatEventData.class, toSystemEventData(events[0])); assertEquals("video", eventData.getTrackType()); assertEquals("video", eventData.getTrackName()); assertEquals("11999", eventData.getLastTimestamp()); assertEquals("1000", eventData.getTimescale()); assertTrue(eventData.isUnexpectedBitrate()); assertEquals("Running", eventData.getState()); assertFalse(eventData.isHealthy()); assertEquals(0, eventData.getIngestDriftValue()); assertEquals(OffsetDateTime.parse("2021-05-14T23:50:00.00Z"), eventData.getLastFragmentArrivalTime()); jsonData = "[{ \"topic\": \"/subscriptions/{subscription id}/resourceGroups/{resource group}/providers/Microsoft.Media/mediaservices/{account name}\", \"subject\": \"liveEvent/liveevent-ec9d26a8\", \"eventType\": \"Microsoft.Media.LiveEventIngestHeartbeat\", \"eventTime\": \"2018-10-12T15:52:37.3710102\", \"id\": \"d84727e2-d9c0-4a21-a66b-8d23f06b3e06\", \"data\": { \"trackType\": \"video\", \"trackName\": \"video\", \"bitrate\": 2500000, \"incomingBitrate\": 500726, \"lastTimestamp\": \"11999\", \"timescale\": \"1000\", \"overlapCount\": 0, \"discontinuityCount\": 0, \"nonincreasingCount\": 0, \"unexpectedBitrate\": true, \"state\": \"Running\", \"healthy\": false, \"lastFragmentArrivalTime\": \"2021-05-14T23:50:00.00\", \"ingestDriftValue\": \"n/a\" }, \"dataVersion\": \"1.0\", \"metadataVersion\": \"1\"}]"; events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventIngestHeartbeatEventData.class, toSystemEventData(events[0])); eventData = (MediaLiveEventIngestHeartbeatEventData) toSystemEventData(events[0]); assertNull(eventData.getIngestDriftValue()); } @Test public void consumeMediaLiveEventTrackDiscontinuityDetectedEvent() throws IOException { String jsonData = getTestPayloadFromFile("MediaLiveEventTrackDiscontinuityDetectedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaLiveEventTrackDiscontinuityDetectedEventData eventData = assertInstanceOf(MediaLiveEventTrackDiscontinuityDetectedEventData.class, toSystemEventData(events[0])); assertEquals("video", eventData.getTrackType()); assertEquals("video", eventData.getTrackName()); assertEquals("10999", eventData.getPreviousTimestamp()); assertEquals("14999", eventData.getNewTimestamp()); assertEquals("1000", eventData.getTimescale()); assertEquals("4000", eventData.getDiscontinuityGap()); } @Test public void consumeMediaLiveEventChannelArchiveHeartbeatEvent() throws IOException { String jsonData = "[{ \"topic\": \"/subscriptions/{subscription id}/resourceGroups/{resource group}/providers/Microsoft.Media/mediaservices/{account name}\", \"subject\": \"liveEvent/mle1\", \"eventType\": \"Microsoft.Media.LiveEventChannelArchiveHeartbeat\", \"eventTime\": \"2021-05-14T23:50:00.324\", \"id\": \"7f450938-491f-41e1-b06f-c6cd3965d786\", \"data\": { \"channelLatencyMs\": \"10\", \"latencyResultCode\": \"S_OK\"}, \"dataVersion\": \"1.0\", \"metadataVersion\": \"1\"}]"; EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); MediaLiveEventChannelArchiveHeartbeatEventData eventData = assertInstanceOf(MediaLiveEventChannelArchiveHeartbeatEventData.class, toSystemEventData(events[0])); assertEquals(Duration.ofMillis(10), eventData.getChannelLatency()); assertEquals("S_OK", eventData.getLatencyResultCode()); jsonData = "[{ \"topic\": \"/subscriptions/{subscription id}/resourceGroups/{resource group}/providers/Microsoft.Media/mediaservices/{account name}\", \"subject\": \"liveEvent/mle1\", \"eventType\": \"Microsoft.Media.LiveEventChannelArchiveHeartbeat\", \"eventTime\": \"2021-05-14T23:50:00.324\", \"id\": \"7f450938-491f-41e1-b06f-c6cd3965d786\", \"data\": { \"channelLatencyMs\": \"n/a\", \"latencyResultCode\": \"S_OK\"}, \"dataVersion\": \"1.0\", \"metadataVersion\": \"1\"}]"; events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); assertInstanceOf(MediaLiveEventChannelArchiveHeartbeatEventData.class, toSystemEventData(events[0])); eventData = (MediaLiveEventChannelArchiveHeartbeatEventData) toSystemEventData(events[0]); assertNull(eventData.getChannelLatency()); assertEquals("S_OK", eventData.getLatencyResultCode()); } @Test public void consumeResourceWriteFailureEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceWriteFailureEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ResourceWriteFailureEventData eventData = assertInstanceOf(ResourceWriteFailureEventData.class, toSystemEventData(events[0])); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceWriteCancelEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceWriteCancelEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ResourceWriteCancelEventData eventData = assertInstanceOf(ResourceWriteCancelEventData.class, toSystemEventData(events[0])); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceDeleteSuccessEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceDeleteSuccessEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ResourceDeleteSuccessEventData eventData = assertInstanceOf(ResourceDeleteSuccessEventData.class, toSystemEventData(events[0])); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceDeleteFailureEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceDeleteFailureEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ResourceDeleteFailureEventData eventData = assertInstanceOf(ResourceDeleteFailureEventData.class, toSystemEventData(events[0])); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceDeleteCancelEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceDeleteCancelEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ResourceDeleteCancelEventData eventData = assertInstanceOf(ResourceDeleteCancelEventData.class, toSystemEventData(events[0])); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceActionSuccessEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceActionSuccessEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ResourceActionSuccessEventData eventData = assertInstanceOf(ResourceActionSuccessEventData.class, toSystemEventData(events[0])); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceActionFailureEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceActionFailureEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ResourceActionFailureEventData eventData = assertInstanceOf(ResourceActionFailureEventData.class, toSystemEventData(events[0])); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeResourceActionCancelEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceActionCancelEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ResourceActionCancelEventData eventData = assertInstanceOf(ResourceActionCancelEventData.class, toSystemEventData(events[0])); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeServiceBusActiveMessagesAvailableWithNoListenersEvent() throws IOException { String jsonData = getTestPayloadFromFile("ServiceBusActiveMessagesAvailableWithNoListenersEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ServiceBusActiveMessagesAvailableWithNoListenersEventData eventData = assertInstanceOf(ServiceBusActiveMessagesAvailableWithNoListenersEventData.class, toSystemEventData(events[0])); assertEquals("testns1", eventData.getNamespaceName()); } @Test public void consumeServiceBusDeadletterMessagesAvailableWithNoListenersEvent() throws IOException { String jsonData = getTestPayloadFromFile("ServiceBusDeadletterMessagesAvailableWithNoListenersEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ServiceBusDeadletterMessagesAvailableWithNoListenersEventData eventData = assertInstanceOf(ServiceBusDeadletterMessagesAvailableWithNoListenersEventData.class, toSystemEventData(events[0])); assertEquals("testns1", eventData.getNamespaceName()); } @Test public void consumeStorageBlobCreatedEvent() throws IOException { String jsonData = getTestPayloadFromFile("StorageBlobCreatedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); StorageBlobCreatedEventData eventData = assertInstanceOf(StorageBlobCreatedEventData.class, toSystemEventData(events[0])); assertEquals("https: } @Test public void consumeStorageBlobDeletedEvent() throws IOException { String jsonData = getTestPayloadFromFile("StorageBlobDeletedEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); StorageBlobDeletedEventData eventData = assertInstanceOf(StorageBlobDeletedEventData.class, toSystemEventData(events[0])); assertEquals("https: } @Test public void consumeCloudEventStorageBlobRenamedEvent() { String jsonData = "[ { \"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Storage/storageAccounts/myaccount\", \"subject\": \"/blobServices/default/containers/testcontainer/blobs/testfile.txt\", \"type\": \"Microsoft.Storage.BlobRenamed\", \"time\": \"2017-08-16T01:57:26.005121Z\", \"id\": \"602a88ef-0001-00e6-1233-1646070610ea\", \"data\": { \"api\": \"RenameFile\", \"clientRequestId\": \"799304a4-bbc5-45b6-9849-ec2c66be800a\", \"requestId\": \"602a88ef-0001-00e6-1233-164607000000\", \"eTag\": \"0x8D4E44A24ABE7F1\", \"destinationUrl\": \"https: CloudEvent[] events = CloudEvent.fromString(jsonData).toArray(new CloudEvent[0]); assertNotNull(events); StorageBlobRenamedEventData eventData = assertInstanceOf(StorageBlobRenamedEventData.class, toSystemEventData(events[0])); assertEquals("https: } @Test public void consumeStorageDirectoryCreatedEvent() { String requestContent = "[ { \"topic\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Storage/storageAccounts/myaccount\", \"subject\": \"/blobServices/default/containers/testcontainer/blobs/testDir\", \"eventType\": \"Microsoft.Storage.DirectoryCreated\", \"eventTime\": \"2017-08-16T01:57:26.005121Z\", \"id\": \"602a88ef-0001-00e6-1233-1646070610ea\", \"data\": { \"api\": \"CreateDirectory\", \"clientRequestId\": \"799304a4-bbc5-45b6-9849-ec2c66be800a\", \"requestId\": \"602a88ef-0001-00e6-1233-164607000000\", \"eTag\": \"0x8D4E44A24ABE7F1\", \"url\": \"https: List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); StorageDirectoryCreatedEventData eventData = (StorageDirectoryCreatedEventData) toSystemEventData(events.get(0)); assertEquals("https: } @Test public void consumeStorageDirectoryDeletedEvent() { String requestContent = "[{ \"topic\": \"/subscriptions/id/resourceGroups/Storage/providers/Microsoft.Storage/storageAccounts/xstoretestaccount\", \"subject\": \"/blobServices/default/containers/testcontainer/blobs/testDir\", \"eventType\": \"Microsoft.Storage.DirectoryDeleted\", \"eventTime\": \"2017-11-07T20:09:22.5674003Z\", \"id\": \"4c2359fe-001e-00ba-0e04-58586806d298\", \"data\": { \"api\": \"DeleteDirectory\", \"requestId\": \"4c2359fe-001e-00ba-0e04-585868000000\", \"url\": \"https: List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); StorageDirectoryDeletedEventData eventData = (StorageDirectoryDeletedEventData) toSystemEventData(events.get(0)); assertEquals("https: } @Test public void consumeStorageDirectoryRenamedEvent() { String requestContent = "[{ \"topic\": \"/subscriptions/id/resourceGroups/Storage/providers/Microsoft.Storage/storageAccounts/xstoretestaccount\", \"subject\": \"/blobServices/default/containers/testcontainer/blobs/testDir\", \"eventType\": \"Microsoft.Storage.DirectoryRenamed\", \"eventTime\": \"2017-11-07T20:09:22.5674003Z\", \"id\": \"4c2359fe-001e-00ba-0e04-58586806d298\", \"data\": { \"api\": \"RenameDirectory\", \"requestId\": \"4c2359fe-001e-00ba-0e04-585868000000\", \"destinationUrl\": \"https: List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); StorageDirectoryRenamedEventData eventData = (StorageDirectoryRenamedEventData) toSystemEventData(events.get(0)); assertEquals("https: } @Test public void consumeResourceWriteSuccessEvent() throws IOException { String jsonData = getTestPayloadFromFile("ResourceWriteSuccessEvent.json"); EventGridEvent[] events = EventGridEvent.fromString(jsonData).toArray(new EventGridEvent[0]); assertNotNull(events); ResourceWriteSuccessEventData eventData = assertInstanceOf(ResourceWriteSuccessEventData.class, toSystemEventData(events[0])); assertEquals("72f988bf-86f1-41af-91ab-2d7cd011db47", eventData.getTenantId()); } @Test public void consumeMachineLearningServicesModelRegisteredEvent() { String requestContent = "[{\"topic\":\"/subscriptions/a5fe3bc5-98f0-4c84-affc-a589f54d9b23/resourceGroups/jenns/providers/Microsoft.MachineLearningServices/workspaces/jenns-canary\",\"eventType\":\"Microsoft.MachineLearningServices.ModelRegistered\",\"subject\":\"models/sklearn_regression_model:3\",\"eventTime\":\"2019-10-17T22:23:57.5350054+00:00\",\"id\":\"3b73ee51-bbf4-480d-9112-cfc23b41bfdb\",\"data\":{\"modelName\":\"sklearn_regression_model\",\"modelVersion\":\"3\",\"modelTags\":{\"area\":\"diabetes\",\"type\":\"regression\"},\"modelProperties\":{\"area\":\"test\"}},\"dataVersion\":\"2\",\"metadataVersion\":\"1\"}]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); MachineLearningServicesModelRegisteredEventData eventData = (MachineLearningServicesModelRegisteredEventData) toSystemEventData(events.get(0)); assertEquals("sklearn_regression_model", eventData.getModelName()); assertEquals("3", eventData.getModelVersion()); assertInstanceOf(Map.class, eventData.getModelTags()); assertEquals("regression", ((Map<?, ?>) eventData.getModelTags()).get("type")); assertInstanceOf(Map.class, eventData.getModelProperties()); assertEquals("test", ((Map<?, ?>) eventData.getModelProperties()).get("area")); } @Test public void consumeMachineLearningServicesModelDeployedEvent() { String requestContent = "[{\"topic\":\"/subscriptions/a5fe3bc5-98f0-4c84-affc-a589f54d9b23/resourceGroups/jenns/providers/Microsoft.MachineLearningServices/workspaces/jenns-canary\",\"eventType\":\"Microsoft.MachineLearningServices.ModelDeployed\",\"subject\":\"endpoints/aciservice1\",\"eventTime\":\"2019-10-23T18:20:08.8824474+00:00\",\"id\":\"40d0b167-be44-477b-9d23-a2befba7cde0\",\"data\":{\"serviceName\":\"aciservice1\",\"serviceComputeType\":\"ACI\",\"serviceTags\":{\"mytag\":\"test tag\"},\"serviceProperties\":{\"myprop\":\"test property\"},\"modelIds\":\"my_first_model:1,my_second_model:1\"},\"dataVersion\":\"2\",\"metadataVersion\":\"1\"}]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); MachineLearningServicesModelDeployedEventData eventData = (MachineLearningServicesModelDeployedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals("aciservice1", eventData.getServiceName()); assertEquals(2, eventData.getModelIds().split(",").length); } @Test public void consumeMachineLearningServicesRunCompletedEvent() { String requestContent = "[{\"topic\":\"/subscriptions/a5fe3bc5-98f0-4c84-affc-a589f54d9b23/resourceGroups/jenns/providers/Microsoft.MachineLearningServices/workspaces/jenns-canary\",\"eventType\":\"Microsoft.MachineLearningServices.RunCompleted\",\"subject\":\"experiments/0fa9dfaa-cba3-4fa7-b590-23e48548f5c1/runs/AutoML_ad912b2d-6467-4f32-a616-dbe4af6dd8fc\",\"eventTime\":\"2019-10-18T19:29:55.8856038+00:00\",\"id\":\"044ac44d-462c-4043-99eb-d9e01dc760ab\",\"data\":{\"experimentId\":\"0fa9dfaa-cba3-4fa7-b590-23e48548f5c1\",\"experimentName\":\"automl-local-regression\",\"runId\":\"AutoML_ad912b2d-6467-4f32-a616-dbe4af6dd8fc\",\"runType\":\"automl\",\"RunTags\":{\"experiment_status\":\"ModelSelection\",\"experiment_status_descr\":\"Beginning model selection.\"},\"runProperties\":{\"num_iterations\":\"10\",\"target\":\"local\"}},\"dataVersion\":\"2\",\"metadataVersion\":\"1\"}]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); MachineLearningServicesRunCompletedEventData eventData = (MachineLearningServicesRunCompletedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals("AutoML_ad912b2d-6467-4f32-a616-dbe4af6dd8fc", eventData.getRunId()); assertEquals("automl-local-regression", eventData.getExperimentName()); } @Test public void consumeMachineLearningServicesRunStatusChangedEvent() { String requestContent = "[{\"topic\":\"/subscriptions/a5fe3bc5-98f0-4c84-affc-a589f54d9b23/resourceGroups/jenns/providers/Microsoft.MachineLearningServices/workspaces/jenns-canary\",\"eventType\":\"Microsoft.MachineLearningServices.RunStatusChanged\",\"subject\":\"experiments/0fa9dfaa-cba3-4fa7-b590-23e48548f5c1/runs/AutoML_ad912b2d-6467-4f32-a616-dbe4af6dd8fc\",\"eventTime\":\"2020-03-09T23:53:04.4579724Z\",\"id\":\"aa8cd7df-fe28-5d5d-9b40-3342dbc2a887\",\"data\":{\"runStatus\": \"Running\",\"experimentId\":\"0fa9dfaa-cba3-4fa7-b590-23e48548f5c1\",\"experimentName\":\"automl-local-regression\",\"runId\":\"AutoML_ad912b2d-6467-4f32-a616-dbe4af6dd8fc\",\"runType\":\"automl\",\"runTags\":{\"experiment_status\":\"ModelSelection\",\"experiment_status_descr\":\"Beginning model selection.\"},\"runProperties\":{\"num_iterations\":\"10\",\"target\":\"local\"}},\"dataVersion\":\"2\",\"metadataVersion\":\"1\"}]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); MachineLearningServicesRunStatusChangedEventData eventData = (MachineLearningServicesRunStatusChangedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals("AutoML_ad912b2d-6467-4f32-a616-dbe4af6dd8fc", eventData.getRunId()); assertEquals("automl-local-regression", eventData.getExperimentName()); assertEquals("Running", eventData.getRunStatus()); assertEquals("automl", eventData.getRunType()); } @Test public void consumeMachineLearningServicesDatasetDriftDetectedEvent() { String requestContent = "[{\"topic\":\"/subscriptions/60582a10-b9fd-49f1-a546-c4194134bba8/resourceGroups/copetersRG/providers/Microsoft.MachineLearningServices/workspaces/driftDemoWS\",\"eventType\":\"Microsoft.MachineLearningServices.DatasetDriftDetected\",\"subject\":\"datadrift/01d29aa4-e6a4-470a-9ef3-66660d21f8ef/run/01d29aa4-e6a4-470a-9ef3-66660d21f8ef_1571590300380\",\"eventTime\":\"2019-10-20T17:08:08.467191+00:00\",\"id\":\"2684de79-b145-4dcf-ad2e-6a1db798585f\",\"data\":{\"dataDriftId\":\"01d29aa4-e6a4-470a-9ef3-66660d21f8ef\",\"dataDriftName\":\"copetersDriftMonitor3\",\"runId\":\"01d29aa4-e6a4-470a-9ef3-66660d21f8ef_1571590300380\",\"baseDatasetId\":\"3c56d136-0f64-4657-a0e8-5162089a88a3\",\"tarAsSystemEventDatasetId\":\"d7e74d2e-c972-4266-b5fb-6c9c182d2a74\",\"driftCoefficient\":0.8350349068479208,\"startTime\":\"2019-07-04T00:00:00+00:00\",\"endTime\":\"2019-07-05T00:00:00+00:00\"},\"dataVersion\":\"2\",\"metadataVersion\":\"1\"}]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); MachineLearningServicesDatasetDriftDetectedEventData eventData = (MachineLearningServicesDatasetDriftDetectedEventData) toSystemEventData(events.get(0)); assertEquals("copetersDriftMonitor3", eventData.getDataDriftName()); } @Test public void consumeCloudEventWebAppUpdatedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.AppUpdated\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebAppUpdatedEventData eventData = (WebAppUpdatedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebBackupOperationStartedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.BackupOperationStarted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebBackupOperationStartedEventData eventData = (WebBackupOperationStartedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebBackupOperationCompletedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.BackupOperationCompleted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebBackupOperationCompletedEventData eventData = (WebBackupOperationCompletedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebBackupOperationFailedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.BackupOperationFailed\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebBackupOperationFailedEventData eventData = (WebBackupOperationFailedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebRestoreOperationStartedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.RestoreOperationStarted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebRestoreOperationStartedEventData eventData = (WebRestoreOperationStartedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebRestoreOperationCompletedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.RestoreOperationCompleted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebRestoreOperationCompletedEventData eventData = (WebRestoreOperationCompletedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebRestoreOperationFailedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.RestoreOperationFailed\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebRestoreOperationFailedEventData eventData = (WebRestoreOperationFailedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebSlotSwapStartedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.SlotSwapStarted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent, false); WebSlotSwapStartedEventData eventData = (WebSlotSwapStartedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebSlotSwapCompletedEvent() { String siteName = "testSite01"; String requestContent = "[{\"specversion\": \"1.0\", \"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.SlotSwapCompleted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"}}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebSlotSwapCompletedEventData eventData = (WebSlotSwapCompletedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebSlotSwapFailedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.SlotSwapFailed\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"}, \"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebSlotSwapFailedEventData eventData = (WebSlotSwapFailedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebSlotSwapWithPreviewStartedEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.SlotSwapWithPreviewStarted\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"}, \"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebSlotSwapWithPreviewStartedEventData eventData = (WebSlotSwapWithPreviewStartedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebSlotSwapWithPreviewCancelledEvent() { String siteName = "testSite01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/sites/testSite01\", \"subject\": \"/Microsoft.Web/sites/testSite01\",\"type\": \"Microsoft.Web.SlotSwapWithPreviewCancelled\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appEventTypeDetail\": { \"action\": \"Restarted\"},\"name\": \"testSite01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"}, \"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebSlotSwapWithPreviewCancelledEventData eventData = (WebSlotSwapWithPreviewCancelledEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(siteName, eventData.getName()); } @Test public void consumeCloudEventWebAppServicePlanUpdatedEvent() { String planName = "testPlan01"; String requestContent = "[{\"source\": \"/subscriptions/319a9601-1ec0-0000-aebc-8fe82724c81e/resourceGroups/testrg/providers/Microsoft.Web/serverfarms/testPlan01\", \"subject\": \"/Microsoft.Web/serverfarms/testPlan01\",\"type\": \"Microsoft.Web.AppServicePlanUpdated\", \"time\": \"2017-08-16T01:57:26.005121Z\",\"id\": \"602a88ef-0001-00e6-1233-1646070610ea\",\"data\": { \"appServicePlanEventTypeDetail\": { \"stampKind\": \"Public\",\"action\": \"Updated\",\"status\": \"Started\" },\"name\": \"testPlan01\",\"clientRequestId\": \"ce636635-2b81-4981-a9d4-cec28fb5b014\",\"correlationRequestId\": \"61baa426-c91f-4e58-b9c6-d3852c4d88d\",\"requestId\": \"0a4d5b5e-7147-482f-8e21-4219aaacf62a\",\"address\": \"/subscriptions/ef90e930-9d7f-4a60-8a99-748e0eea69de/resourcegroups/egcanarytest/providers/Microsoft.Web/sites/egtestapp/restart?api-version=2016-03-01\",\"verb\": \"POST\"},\"specversion\": \"1.0\",\"specversion\": \"1.0\"}]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); WebAppServicePlanUpdatedEventData eventData = (WebAppServicePlanUpdatedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(planName, eventData.getName()); } @Test public void consumeFhirResourceCreatedEvent() { String requestContent = "[ { \"subject\":\"{fhir-account}.fhir.azurehealthcareapis.com/Patient/e0a1f743-1a70-451f-830e-e96477163902\", \"eventType\":\"Microsoft.HealthcareApis.FhirResourceCreated\", \"eventTime\":\"2017-08-16T03:54:38.2696833Z\", \"id\":\"25b3b0d0-d79b-44d5-9963-440d4e6a9bba\", \"data\": { \"resourceType\": \"Patient\", \"resourceFhirAccount\": \"{fhir-account}.fhir.azurehealthcareapis.com\", \"resourceFhirId\": \"e0a1f743-1a70-451f-830e-e96477163902\", \"resourceVersionId\": 1 }, \"dataVersion\": \"1.0\" }]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); HealthcareFhirResourceCreatedEventData eventData = (HealthcareFhirResourceCreatedEventData) toSystemEventData(events.get(0)); assertEquals(HealthcareFhirResourceType.PATIENT, eventData.getFhirResourceType()); assertEquals("{fhir-account}.fhir.azurehealthcareapis.com", eventData.getFhirServiceHostName()); assertEquals("e0a1f743-1a70-451f-830e-e96477163902", eventData.getFhirResourceId()); assertEquals(1, eventData.getFhirResourceVersionId()); } @Test public void consumeFhirResourceUpdatedEvent() { String requestContent = "[ { \"subject\":\"{fhir-account}.fhir.azurehealthcareapis.com/Patient/e0a1f743-1a70-451f-830e-e96477163902\", \"eventType\":\"Microsoft.HealthcareApis.FhirResourceUpdated\", \"eventTime\":\"2017-08-16T03:54:38.2696833Z\", \"id\":\"25b3b0d0-d79b-44d5-9963-440d4e6a9bba\", \"data\": { \"resourceType\": \"Patient\", \"resourceFhirAccount\": \"{fhir-account}.fhir.azurehealthcareapis.com\", \"resourceFhirId\": \"e0a1f743-1a70-451f-830e-e96477163902\", \"resourceVersionId\": 1 }, \"dataVersion\": \"1.0\" }]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); HealthcareFhirResourceUpdatedEventData eventData = (HealthcareFhirResourceUpdatedEventData) toSystemEventData(events.get(0)); assertEquals(HealthcareFhirResourceType.PATIENT, eventData.getFhirResourceType()); assertEquals("{fhir-account}.fhir.azurehealthcareapis.com", eventData.getFhirServiceHostName()); assertEquals("e0a1f743-1a70-451f-830e-e96477163902", eventData.getFhirResourceId()); assertEquals(1, eventData.getFhirResourceVersionId()); } @Test public void consumeFhirResourceDeletedEvent() { String requestContent = "[ { \"subject\":\"{fhir-account}.fhir.azurehealthcareapis.com/Patient/e0a1f743-1a70-451f-830e-e96477163902\", \"eventType\":\"Microsoft.HealthcareApis.FhirResourceDeleted\", \"eventTime\":\"2017-08-16T03:54:38.2696833Z\", \"id\":\"25b3b0d0-d79b-44d5-9963-440d4e6a9bba\", \"data\": { \"resourceType\": \"Patient\", \"resourceFhirAccount\": \"{fhir-account}.fhir.azurehealthcareapis.com\", \"resourceFhirId\": \"e0a1f743-1a70-451f-830e-e96477163902\", \"resourceVersionId\": 1 }, \"dataVersion\": \"1.0\" }]"; List<EventGridEvent> events = EventGridEvent.fromString(requestContent); assertNotNull(events); HealthcareFhirResourceDeletedEventData eventData = (HealthcareFhirResourceDeletedEventData) toSystemEventData(events.get(0)); assertEquals(HealthcareFhirResourceType.PATIENT, eventData.getFhirResourceType()); assertEquals("{fhir-account}.fhir.azurehealthcareapis.com", eventData.getFhirServiceHostName()); assertEquals("e0a1f743-1a70-451f-830e-e96477163902", eventData.getFhirResourceId()); assertEquals(1, eventData.getFhirResourceVersionId()); } @Test public void consumeCloudEventFhirResourceCreatedEvent() { String requestContent = "[ { \"source\": \"/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.HealthcareApis/workspaces/{workspace-name}\", \"subject\":\"{fhir-account}.fhir.azurehealthcareapis.com/Patient/e0a1f743-1a70-451f-830e-e96477163902\", \"type\":\"Microsoft.HealthcareApis.FhirResourceCreated\", \"time\":\"2017-08-16T03:54:38.2696833Z\", \"id\":\"25b3b0d0-d79b-44d5-9963-440d4e6a9bba\", \"data\": { \"resourceType\": \"Patient\", \"resourceFhirAccount\": \"{fhir-account}.fhir.azurehealthcareapis.com\", \"resourceFhirId\": \"e0a1f743-1a70-451f-830e-e96477163902\", \"resourceVersionId\": 1 }, \"specversion\": \"1.0\" }]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); HealthcareFhirResourceCreatedEventData eventData = (HealthcareFhirResourceCreatedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(HealthcareFhirResourceType.PATIENT, eventData.getFhirResourceType()); assertEquals("{fhir-account}.fhir.azurehealthcareapis.com", eventData.getFhirServiceHostName()); assertEquals("e0a1f743-1a70-451f-830e-e96477163902", eventData.getFhirResourceId()); assertEquals(1, eventData.getFhirResourceVersionId()); } @Test public void consumeCloudEventFhirResourceUpdatedEvent() { String requestContent = "[ { \"source\": \"/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.HealthcareApis/workspaces/{workspace-name}\", \"subject\":\"{fhir-account}.fhir.azurehealthcareapis.com/Patient/e0a1f743-1a70-451f-830e-e96477163902\", \"type\":\"Microsoft.HealthcareApis.FhirResourceUpdated\", \"time\":\"2017-08-16T03:54:38.2696833Z\", \"id\":\"25b3b0d0-d79b-44d5-9963-440d4e6a9bba\", \"data\": { \"resourceType\": \"Patient\", \"resourceFhirAccount\": \"{fhir-account}.fhir.azurehealthcareapis.com\", \"resourceFhirId\": \"e0a1f743-1a70-451f-830e-e96477163902\", \"resourceVersionId\": 1 }, \"specversion\": \"1.0\" }]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); HealthcareFhirResourceUpdatedEventData eventData = (HealthcareFhirResourceUpdatedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(HealthcareFhirResourceType.PATIENT, eventData.getFhirResourceType()); assertEquals("{fhir-account}.fhir.azurehealthcareapis.com", eventData.getFhirServiceHostName()); assertEquals("e0a1f743-1a70-451f-830e-e96477163902", eventData.getFhirResourceId()); assertEquals(1, eventData.getFhirResourceVersionId()); } @Test public void consumeCloudEventFhirResourceDeletedEvent() { String requestContent = "[ { \"source\": \"/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.HealthcareApis/workspaces/{workspace-name}\", \"subject\":\"{fhir-account}.fhir.azurehealthcareapis.com/Patient/e0a1f743-1a70-451f-830e-e96477163902\", \"type\":\"Microsoft.HealthcareApis.FhirResourceDeleted\", \"time\":\"2017-08-16T03:54:38.2696833Z\", \"id\":\"25b3b0d0-d79b-44d5-9963-440d4e6a9bba\", \"data\": { \"resourceType\": \"Patient\", \"resourceFhirAccount\": \"{fhir-account}.fhir.azurehealthcareapis.com\", \"resourceFhirId\": \"e0a1f743-1a70-451f-830e-e96477163902\", \"resourceVersionId\": 1 }, \"specversion\": \"1.0\" }]"; List<CloudEvent> events = CloudEvent.fromString(requestContent); HealthcareFhirResourceDeletedEventData eventData = (HealthcareFhirResourceDeletedEventData) toSystemEventData(events.get(0)); assertNotNull(events); assertEquals(HealthcareFhirResourceType.PATIENT, eventData.getFhirResourceType()); assertEquals("{fhir-account}.fhir.azurehealthcareapis.com", eventData.getFhirServiceHostName()); assertEquals("e0a1f743-1a70-451f-830e-e96477163902", eventData.getFhirResourceId()); assertEquals(1, eventData.getFhirResourceVersionId()); } @Test public void verifyAcsRouterJobClassificationFailedEventDataErrors() { ResponseError error = new ResponseError("InvalidRequest", "The request is invalid"); AcsRouterJobClassificationFailedEventData eventData = new AcsRouterJobClassificationFailedEventData(); eventData.setErrors(Collections.singletonList(error)); List<ResponseError> errors = eventData.getErrors(); assertEquals(1, errors.size()); assertEquals("InvalidRequest", errors.get(0).getCode()); assertEquals("The request is invalid", errors.get(0).getMessage()); } private String getTestPayloadFromFile(String fileName) throws IOException { ClassLoader classLoader = getClass().getClassLoader(); try (InputStream inputStream = classLoader.getResourceAsStream("customization/" + fileName)) { byte[] bytes = new byte[inputStream.available()]; inputStream.read(bytes); return new String(bytes); } } }
wdyt about warning?
private void logCPUResourcesConcurrencyMismatch() { final int cores = Runtime.getRuntime().availableProcessors(); final int poolSize = DEFAULT_BOUNDED_ELASTIC_SIZE; final int concurrency = maxConcurrentSessions * concurrencyPerSession; if (concurrencyPerSession > poolSize || concurrency > CONCURRENCY_PER_CORE * cores) { final String message = concurrency + " (ConcurrentSessions=" + maxConcurrentSessions + ", ConcurrencyPerSession=" + concurrencyPerSession + ")"; logger.atInfo().log(CORES_VS_CONCURRENCY_MESSAGE, poolSize, cores, message); } }
logger.atInfo().log(CORES_VS_CONCURRENCY_MESSAGE, poolSize, cores, message);
private void logCPUResourcesConcurrencyMismatch() { final int cores = Runtime.getRuntime().availableProcessors(); final int poolSize = DEFAULT_BOUNDED_ELASTIC_SIZE; final int concurrency = maxConcurrentSessions * concurrencyPerSession; if (concurrencyPerSession > poolSize || concurrency > CONCURRENCY_PER_CORE * cores) { final String message = concurrency + " (ConcurrentSessions=" + maxConcurrentSessions + ", ConcurrencyPerSession=" + concurrencyPerSession + ")"; logger.atWarning().log(CORES_VS_CONCURRENCY_MESSAGE, poolSize, cores, message); } }
class SessionsMessagePump { private static final AtomicLong COUNTER = new AtomicLong(); private static final ArrayList<RollingSessionReceiver> EMPTY = new ArrayList<>(0); private static final ArrayList<RollingSessionReceiver> TERMINATED = new ArrayList<>(0); private static final Duration CONNECTION_STATE_POLL_INTERVAL = Duration.ofSeconds(20); private final long pumpId; private final ClientLogger logger; private final String identifier; private final String fullyQualifiedNamespace; private final String entityPath; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusSessionAcquirer sessionAcquirer; private final Duration maxSessionLockRenew; private final Duration sessionIdleTimeout; private final int maxConcurrentSessions; private final int concurrencyPerSession; private final int prefetch; private final boolean enableAutoDisposition; private final MessageSerializer serializer; private final AmqpRetryPolicy retryPolicy; private final Consumer<ServiceBusReceivedMessageContext> processMessage; private final Consumer<ServiceBusErrorContext> processError; private final Runnable onTerminate; private final AtomicReference<List<RollingSessionReceiver>> rollingReceiversRef = new AtomicReference<>(EMPTY); private final SessionReceiversTracker receiversTracker; private final Mono<ServiceBusSessionAcquirer.Session> nextSession; SessionsMessagePump(String identifier, String fullyQualifiedNamespace, String entityPath, ServiceBusReceiveMode receiveMode, ServiceBusReceiverInstrumentation instrumentation, ServiceBusSessionAcquirer sessionAcquirer, Duration maxSessionLockRenew, Duration sessionIdleTimeout, int maxConcurrentSessions, int concurrencyPerSession, int prefetch, boolean enableAutoDisposition, MessageSerializer serializer, AmqpRetryPolicy retryPolicy, Consumer<ServiceBusReceivedMessageContext> processMessage, Consumer<ServiceBusErrorContext> processError, Runnable onTerminate) { this.pumpId = COUNTER.incrementAndGet(); final Map<String, Object> loggingContext = new HashMap<>(3); loggingContext.put(PUMP_ID_KEY, pumpId); loggingContext.put(FULLY_QUALIFIED_NAMESPACE_KEY, fullyQualifiedNamespace); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(SessionsMessagePump.class, loggingContext); this.identifier = identifier; this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); Objects.requireNonNull(receiveMode, "'receiveMode' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'instrumentation' cannot be null"); this.sessionAcquirer = Objects.requireNonNull(sessionAcquirer, "'sessionAcquirer' cannot be null"); this.maxSessionLockRenew = Objects.requireNonNull(maxSessionLockRenew, "'maxSessionLockRenew' cannot be null."); this.sessionIdleTimeout = sessionIdleTimeout != null ? sessionIdleTimeout : retryPolicy.getRetryOptions().getTryTimeout(); this.maxConcurrentSessions = maxConcurrentSessions; this.concurrencyPerSession = concurrencyPerSession; this.prefetch = prefetch; this.enableAutoDisposition = enableAutoDisposition; this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null."); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null."); this.onTerminate = Objects.requireNonNull(onTerminate, "'onTerminate' cannot be null."); this.receiversTracker = new SessionReceiversTracker(logger, maxConcurrentSessions, fullyQualifiedNamespace, entityPath, receiveMode, instrumentation); this.nextSession = new NextSession(pumpId, fullyQualifiedNamespace, entityPath, sessionAcquirer).mono(); } String getIdentifier() { return identifier; } /** * Obtain a Mono that when subscribed, start pumping messages from {@code maxConcurrentSessions} sessions. * The Mono emits terminal signal once there is a failure in obtaining a new session. * * <p>The Mono emits {@link UnsupportedOperationException} if it is subscribed more than once. If the Mono is subscribed * after the termination of SessionsMessagePump it emits {@link MessagePumpTerminatedException}.</p> * * @return the Mono to begin and cancel message pumping. */ Mono<Void> begin() { logCPUResourcesConcurrencyMismatch(); final Mono<List<RollingSessionReceiver>> createReceiversMono = Mono.fromSupplier(() -> { throwIfTerminatedOrInitialized(); final List<RollingSessionReceiver> rollingReceivers = createRollingSessionReceivers(); if (!rollingReceiversRef.compareAndSet(EMPTY, rollingReceivers)) { rollingReceivers.clear(); throwIfTerminatedOrInitialized(); } return rollingReceivers; }); final Function<List<RollingSessionReceiver>, Mono<Void>> pumpFromReceiversMono = rollingReceivers -> { final List<Mono<Void>> pumpingList = new ArrayList<>(rollingReceivers.size()); for (RollingSessionReceiver rollingReceiver : rollingReceivers) { pumpingList.add(rollingReceiver.begin()); } final Mono<Void> terminatePumping = pollConnectionState(); final Mono<Void> pumping = Mono.when(pumpingList); return Mono.firstWithSignal(terminatePumping, pumping); }; final Mono<Void> pumpingMessages = Mono.usingWhen(createReceiversMono, pumpFromReceiversMono, (__) -> terminate(TerminalSignalType.COMPLETED), (__, e) -> terminate(TerminalSignalType.ERRORED), (__) -> terminate(TerminalSignalType.CANCELED)); return pumpingMessages .onErrorMap(e -> { if (e instanceof MessagePumpTerminatedException) { return e; } return new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "pumping }) .then(Mono.error(() -> MessagePumpTerminatedException.forCompletion(pumpId, fullyQualifiedNamespace, entityPath))); } private Mono<Void> pollConnectionState() { return Flux.interval(CONNECTION_STATE_POLL_INTERVAL) .handle((ignored, sink) -> { if (sessionAcquirer.isConnectionClosed()) { final RuntimeException e = logger.atInfo() .log(new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session sink.error(e); } }).then(); } private Mono<Void> terminate(TerminalSignalType signalType) { final List<RollingSessionReceiver> rollingReceivers = rollingReceiversRef.getAndSet(TERMINATED); if (rollingReceivers == TERMINATED) { return Mono.empty(); } logger.atInfo().log("Pump terminated. signal:" + signalType); receiversTracker.clear(); onTerminate.run(); return Mono.empty(); } private List<RollingSessionReceiver> createRollingSessionReceivers() { final ArrayList<RollingSessionReceiver> rollingReceivers = new ArrayList<>(maxConcurrentSessions); for (int rollerId = 1; rollerId <= maxConcurrentSessions; rollerId++) { final RollingSessionReceiver rollingReceiver = new RollingSessionReceiver(pumpId, rollerId, instrumentation, fullyQualifiedNamespace, entityPath, nextSession, maxSessionLockRenew, sessionIdleTimeout, concurrencyPerSession, prefetch, enableAutoDisposition, serializer, retryPolicy, processMessage, processError, receiversTracker); rollingReceivers.add(rollingReceiver); } return rollingReceivers; } private void throwIfTerminatedOrInitialized() { final List<RollingSessionReceiver> l = rollingReceiversRef.get(); if (l == TERMINATED) { throw logger.atVerbose().log(new IllegalStateException("Cannot invoke begin() once terminated.")); } if (l != EMPTY) { throw logger.atVerbose().log(new IllegalStateException("Cannot invoke begin() more than once.")); } } /** * The type which provides a Mono {@link NextSession * All the {@link RollingSessionReceiver} in the {@link SessionsMessagePump} shares this Mono to obtain unique sessions. * * <p>The event the Mono fails to acquire a session, the type marks itself as terminated (i.e., self-terminate) and * notifies the subscription about the failure as {@link MessagePumpTerminatedException}. Any later subscriptions will * be notified with MessagePumpTerminatedException.</p> * * <p>If a RollingSessionReceiver encounters acquire session failure, it stops pumping and emit terminal signal. * At this point, the SessionMessagePump will cancel all other RollingSessionReceiver instances. The design of * the SessionMessagePump is to stop pumping from all sessions once any of the RollingSessionReceiver emits terminal * signal. The self-terminating nature of the shared {@link NextSession * more of the RollingSessionReceiver attempting session acquire when SessionMessagePump is about to or in progress * of canceling those.</p> */ private static final class NextSession implements Supplier<Mono<ServiceBusSessionAcquirer.Session>> { private final AtomicReference<Boolean> isTerminated = new AtomicReference<>(false); private final long pumpId; private final String fullyQualifiedNamespace; private final String entityPath; private final ServiceBusSessionAcquirer sessionAcquirer; NextSession(long pumpId, String fullyQualifiedNamespace, String entityPath, ServiceBusSessionAcquirer sessionAcquirer) { this.pumpId = pumpId; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.sessionAcquirer = sessionAcquirer; } Mono<ServiceBusSessionAcquirer.Session> mono() { final Supplier<Mono<ServiceBusSessionAcquirer.Session>> supplier = this; return Mono.defer(supplier); } @Override public Mono<ServiceBusSessionAcquirer.Session> get() { if (isTerminated.get()) { return Mono.error(new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session } return sessionAcquirer.acquire() .onErrorMap(e -> { isTerminated.set(true); return new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session }); } } /** * A type that is responsible for managing a session, concurrently (with parallelism equal to {@code concurrencyPerSession}) * pumping messages from the session and rolling to the next session when current session terminates. */ private static final class RollingSessionReceiver extends AtomicReference<State<ServiceBusSessionReactorReceiver>> { private static final String ROLLER_ID_KEY = "roller-id"; private static final State<ServiceBusSessionReactorReceiver> INIT = State.init(); private static final State<ServiceBusSessionReactorReceiver> TERMINATED = State.terminated(); private final ClientLogger logger; private final long pumpId; private final int rollerId; private final String fullyQualifiedNamespace; private final String entityPath; private final int concurrency; private final Consumer<ServiceBusReceivedMessageContext> processMessage; private final Consumer<ServiceBusErrorContext> processError; private final boolean enableAutoDisposition; private final Duration maxSessionLockRenew; private final Duration sessionIdleTimeout; private final MessageSerializer serializer; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final SessionReceiversTracker receiversTracker; private final NextSessionStream nextSessionStream; private final MessageFlux messageFlux; RollingSessionReceiver(long pumpId, int rollerId, ServiceBusReceiverInstrumentation instrumentation, String fullyQualifiedNamespace, String entityPath, Mono<ServiceBusSessionAcquirer.Session> nextSession, Duration maxSessionLockRenew, Duration sessionIdleTimeout, int concurrency, int prefetch, boolean enableAutoDisposition, MessageSerializer serializer, AmqpRetryPolicy retryPolicy, Consumer<ServiceBusReceivedMessageContext> processMessage, Consumer<ServiceBusErrorContext> processError, SessionReceiversTracker receiversTracker) { super(INIT); this.pumpId = pumpId; final Map<String, Object> loggingContext = new HashMap<>(3); loggingContext.put(ROLLER_ID_KEY, rollerId); loggingContext.put(FULLY_QUALIFIED_NAMESPACE_KEY, fullyQualifiedNamespace); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(RollingSessionReceiver.class, loggingContext); this.rollerId = rollerId; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.concurrency = concurrency; this.processError = processError; this.processMessage = processMessage; this.enableAutoDisposition = enableAutoDisposition; this.maxSessionLockRenew = maxSessionLockRenew; this.sessionIdleTimeout = sessionIdleTimeout; this.serializer = serializer; this.instrumentation = instrumentation; this.tracer = instrumentation.getTracer(); this.receiversTracker = receiversTracker; this.nextSessionStream = new NextSessionStream(pumpId, rollerId, fullyQualifiedNamespace, entityPath, nextSession); final Flux<ServiceBusSessionReactorReceiver> nextSessionReceiverStream = nextSessionStream.flux() .map(this::nextSessionReceiver); this.messageFlux = new MessageFlux(nextSessionReceiverStream, prefetch, CreditFlowMode.RequestDriven, retryPolicy); } Mono<Void> begin() { return Mono.usingWhen( Mono.fromSupplier(() -> { final Scheduler workerScheduler; if (concurrency > 1) { workerScheduler = Schedulers.newBoundedElastic( DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "rolling-session-receiver-" + rollerId); } else { workerScheduler = Schedulers.immediate(); } return workerScheduler; }), workerScheduler -> { final RunOnWorker handleMessageOnWorker = new RunOnWorker(this::handleMessage, workerScheduler); return messageFlux.flatMap(handleMessageOnWorker, concurrency, 1).then(); }, (workerScheduler) -> terminate(TerminalSignalType.COMPLETED, workerScheduler), (workerScheduler, e) -> terminate(TerminalSignalType.ERRORED, workerScheduler), (workerScheduler) -> terminate(TerminalSignalType.CANCELED, workerScheduler) ); } private Mono<Void> terminate(TerminalSignalType signalType, Scheduler workerScheduler) { final State<ServiceBusSessionReactorReceiver> state = super.getAndSet(TERMINATED); if (state == TERMINATED) { return Mono.empty(); } logger.atInfo().log("Roller terminated. rollerId:" + rollerId + " signal:" + signalType); nextSessionStream.close(); workerScheduler.dispose(); return Mono.empty(); } private ServiceBusSessionReactorReceiver nextSessionReceiver(ServiceBusSessionAcquirer.Session nextSession) { final State<ServiceBusSessionReactorReceiver> lastState = super.get(); if (lastState == TERMINATED) { nextSession.getLink().closeAsync().subscribe(); throw new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session } final ServiceBusSessionReactorReceiver nextSessionReceiver = new ServiceBusSessionReactorReceiver(logger, tracer, nextSession, sessionIdleTimeout, maxSessionLockRenew); if (!super.compareAndSet(lastState, new State<>(nextSessionReceiver))) { nextSessionReceiver.closeAsync().subscribe(); throw new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session } if (lastState != INIT) { final ServiceBusSessionReactorReceiver lastSessionReceiver = lastState.receiver; receiversTracker.untrack(lastSessionReceiver); } receiversTracker.track(nextSessionReceiver); return nextSessionReceiver; } private void handleMessage(Message qpidMessage) { final ServiceBusReceivedMessage message = serializer.deserialize(qpidMessage, ServiceBusReceivedMessage.class); instrumentation.instrumentProcess(message, ReceiverKind.PROCESSOR, msg -> { logger.atVerbose() .addKeyValue(SESSION_ID_KEY, message.getSessionId()) .addKeyValue(MESSAGE_ID_LOGGING_KEY, message.getMessageId()) .log("Received message."); final Throwable error = notifyMessage(msg); if (enableAutoDisposition) { if (error == null) { complete(msg); } else { abandon(msg); } } return error; }); } private Throwable notifyMessage(ServiceBusReceivedMessage message) { try { processMessage.accept( new ServiceBusReceivedMessageContext(receiversTracker, new ServiceBusMessageContext(message))); } catch (Exception e) { notifyError(new ServiceBusException(e, ServiceBusErrorSource.USER_CALLBACK)); return e; } return null; } private void notifyError(Throwable throwable) { try { processError.accept(new ServiceBusErrorContext(throwable, fullyQualifiedNamespace, entityPath)); } catch (Exception e) { logger.atVerbose().log("Ignoring error from user processError handler.", e); } } private void complete(ServiceBusReceivedMessage message) { try { receiversTracker.complete(message).block(); } catch (Exception e) { logger.atVerbose().log("Failed to complete message", e); } } private void abandon(ServiceBusReceivedMessage message) { try { receiversTracker.abandon(message).block(); } catch (Exception e) { logger.atVerbose().log("Failed to abandon message", e); } } /** * A type which provide a Flux {@link NextSessionStream * Each {@link RollingSessionReceiver} has a {@link NextSessionStream} instance associated, * when the {@link RollingSessionReceiver} wants to roll to a new session, it requests next session from the Flux. * * <p>Underneath, all the {@link NextSessionStream} instances (across all {@link RollingSessionReceiver}) shares * the common Mono {@link NextSession */ private static final class NextSessionStream extends AtomicBoolean { private final long pumpId; private final int rollerId; private final String fullyQualifiedNamespace; private final String entityPath; private final Mono<ServiceBusSessionAcquirer.Session> newSession; NextSessionStream(long pumpId, int rollerId, String fullyQualifiedNamespace, String entityPath, Mono<ServiceBusSessionAcquirer.Session> nextSession) { super(false); this.pumpId = pumpId; this.rollerId = rollerId; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.newSession = Mono.defer(() -> { final boolean isTerminated = super.get(); if (isTerminated) { return Mono.error(new MessagePumpTerminatedException(this.pumpId, this.fullyQualifiedNamespace, this.entityPath, "session } else { return nextSession; } }).map(session -> { final boolean isTerminated = super.get(); if (isTerminated) { session.getLink().closeAsync().subscribe(); throw new MessagePumpTerminatedException(this.pumpId, this.fullyQualifiedNamespace, this.entityPath, "session } return session; }); } Flux<ServiceBusSessionAcquirer.Session> flux() { return nonEagerRepeat(newSession); } void close() { super.set(true); } private static Flux<ServiceBusSessionAcquirer.Session> nonEagerRepeat(Mono<ServiceBusSessionAcquirer.Session> source) { return source .cacheInvalidateIf(cachedSession -> cachedSession.getLink().isDisposed()) .repeat() .filter(session -> !session.getLink().isDisposed()); } } /** * A Function that when called, invokes {@link Message} handler using a Worker thread from a {@link Scheduler}. */ private static final class RunOnWorker implements Function<Message, Publisher<Void>> { private final Consumer<Message> handleMessage; private final Scheduler workerScheduler; /** * Instantiate {@link RunOnWorker} to run the given {@code handleMessage} handler using a Worker * from the provided {@code workerScheduler}. * * @param handleMessage The message handler. * @param workerScheduler The Scheduler hosting the Worker to run the message handler. */ RunOnWorker(Consumer<Message> handleMessage, Scheduler workerScheduler) { this.handleMessage = handleMessage; this.workerScheduler = workerScheduler; } @Override public Mono<Void> apply(Message qpidMessage) { return Mono.<Void>fromRunnable(() -> { handleMessage.accept(qpidMessage); }).subscribeOn(workerScheduler); } } } /** * Tracks the running {@link ServiceBusSessionReactorReceiver} instances, each backing a RollingSessionReceiver instance. * Each time a RollingSessionReceiver rolls to a new ServiceBusSessionReactorReceiver Rn, it will track Rn by invoking * tack(Rn) and un-tracks the last (closed) ServiceBusSessionReactorReceiver Rm by invoking untrack(Rm). * * <p>The type holds sessionId to ServiceBusSessionReactorReceiver mapping. A session message can be disposition only on * the ServiceBusSessionReactorReceiver delivered it. The mapping tracked by this type enables looking up * the ServiceBusSessionReactorReceiver when a message needs to be disposition.</p> * * <p>It is possible that, a session say session-1 gets acquired by RollingSessionReceiver Ru, * while a RollingSessionReceiver Rv that was previously connected to session-1 rolls to session-2. Measures are * taken to ensure the Rv is not removing (un-track) session-1 tracked by Ru from the shared view in such concurrent * case, the underlying {@link ConcurrentHashMap */ static final class SessionReceiversTracker { private final ClientLogger logger; private final String fullyQualifiedNamespace; private final String entityPath; private final ServiceBusReceiveMode receiveMode; private final ConcurrentHashMap<String, ServiceBusSessionReactorReceiver> receivers; private final ServiceBusReceiverInstrumentation instrumentation; private SessionReceiversTracker(ClientLogger logger, int size, String fullyQualifiedNamespace, String entityPath, ServiceBusReceiveMode receiveMode, ServiceBusReceiverInstrumentation instrumentation) { this.logger = logger; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.receiveMode = receiveMode; this.receivers = new ConcurrentHashMap<>(size); this.instrumentation = instrumentation; } private void track(ServiceBusSessionReactorReceiver receiver) { receivers.put(receiver.getSessionId(), receiver); } private void untrack(ServiceBusSessionReactorReceiver receiver) { receivers.remove(receiver.getSessionId(), receiver); } private void clear() { receivers.clear(); } String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } String getEntityPath() { return entityPath; } Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.SUSPENDED, null, null, null, null); } Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.ABANDONED, options.getPropertiesToModify(), null, null, options.getTransactionContext()); } Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getPropertiesToModify(), options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getTransactionContext()); } Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.DEFERRED, options.getPropertiesToModify(), null, null, options.getTransactionContext()); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { if (receiveMode != ServiceBusReceiveMode.PEEK_LOCK) { final String m = String.format("'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus); return Mono.error(new UnsupportedOperationException(m)); } else if (message.isSettled()) { final String m = "The message has either been deleted or already settled."; return Mono.error(new IllegalArgumentException(m)); } else if (message.getLockToken() == null) { final String m = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error(new UnsupportedOperationException(m)); } final String sessionId = message.getSessionId(); final ServiceBusSessionReactorReceiver receiver = receivers.get(sessionId); final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); Mono<Void> updateDispositionMono; if (receiver != null) { updateDispositionMono = receiver.updateDisposition(message.getLockToken(), deliveryState); } else { updateDispositionMono = Mono.error(DeliveryNotOnLinkException.noMatchingDelivery(message.getLockToken(), deliveryState)); } return instrumentation.instrumentSettlement(updateDispositionMono, message, message.getContext(), dispositionStatus); } private Mono<Void> checkNull(Object options, ServiceBusTransactionContext transactionContext) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (transactionContext != null && transactionContext.getTransactionId() == null) { return monoError(logger, new NullPointerException("'options.transactionContext.transactionId' cannot be null.")); } return null; } } private static final class State<T extends AsyncCloseable> { final T receiver; State(T receiver) { this.receiver = Objects.requireNonNull(receiver); } static <T extends AsyncCloseable> State<T> init() { return new State<>(); } static <T extends AsyncCloseable> State<T> terminated() { return new State<>(); } private State() { this.receiver = null; } } /** * The signal that triggered {@link SessionsMessagePump} and {@link RollingSessionReceiver} termination. */ private enum TerminalSignalType { COMPLETED, ERRORED, CANCELED, } }
class SessionsMessagePump { private static final AtomicLong COUNTER = new AtomicLong(); private static final ArrayList<RollingSessionReceiver> EMPTY = new ArrayList<>(0); private static final ArrayList<RollingSessionReceiver> TERMINATED = new ArrayList<>(0); private static final Duration CONNECTION_STATE_POLL_INTERVAL = Duration.ofSeconds(20); private final long pumpId; private final ClientLogger logger; private final String identifier; private final String fullyQualifiedNamespace; private final String entityPath; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusSessionAcquirer sessionAcquirer; private final Duration maxSessionLockRenew; private final Duration sessionIdleTimeout; private final int maxConcurrentSessions; private final int concurrencyPerSession; private final int prefetch; private final boolean enableAutoDisposition; private final MessageSerializer serializer; private final AmqpRetryPolicy retryPolicy; private final Consumer<ServiceBusReceivedMessageContext> processMessage; private final Consumer<ServiceBusErrorContext> processError; private final Runnable onTerminate; private final AtomicReference<List<RollingSessionReceiver>> rollingReceiversRef = new AtomicReference<>(EMPTY); private final SessionReceiversTracker receiversTracker; private final Mono<ServiceBusSessionAcquirer.Session> nextSession; SessionsMessagePump(String identifier, String fullyQualifiedNamespace, String entityPath, ServiceBusReceiveMode receiveMode, ServiceBusReceiverInstrumentation instrumentation, ServiceBusSessionAcquirer sessionAcquirer, Duration maxSessionLockRenew, Duration sessionIdleTimeout, int maxConcurrentSessions, int concurrencyPerSession, int prefetch, boolean enableAutoDisposition, MessageSerializer serializer, AmqpRetryPolicy retryPolicy, Consumer<ServiceBusReceivedMessageContext> processMessage, Consumer<ServiceBusErrorContext> processError, Runnable onTerminate) { this.pumpId = COUNTER.incrementAndGet(); final Map<String, Object> loggingContext = new HashMap<>(3); loggingContext.put(PUMP_ID_KEY, pumpId); loggingContext.put(FULLY_QUALIFIED_NAMESPACE_KEY, fullyQualifiedNamespace); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(SessionsMessagePump.class, loggingContext); this.identifier = identifier; this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); Objects.requireNonNull(receiveMode, "'receiveMode' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'instrumentation' cannot be null"); this.sessionAcquirer = Objects.requireNonNull(sessionAcquirer, "'sessionAcquirer' cannot be null"); this.maxSessionLockRenew = Objects.requireNonNull(maxSessionLockRenew, "'maxSessionLockRenew' cannot be null."); this.sessionIdleTimeout = sessionIdleTimeout != null ? sessionIdleTimeout : retryPolicy.getRetryOptions().getTryTimeout(); this.maxConcurrentSessions = maxConcurrentSessions; this.concurrencyPerSession = concurrencyPerSession; this.prefetch = prefetch; this.enableAutoDisposition = enableAutoDisposition; this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null."); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null."); this.onTerminate = Objects.requireNonNull(onTerminate, "'onTerminate' cannot be null."); this.receiversTracker = new SessionReceiversTracker(logger, maxConcurrentSessions, fullyQualifiedNamespace, entityPath, receiveMode, instrumentation); this.nextSession = new NextSession(pumpId, fullyQualifiedNamespace, entityPath, sessionAcquirer).mono(); } String getIdentifier() { return identifier; } /** * Obtain a Mono that when subscribed, start pumping messages from {@code maxConcurrentSessions} sessions. * The Mono emits terminal signal once there is a failure in obtaining a new session. * * <p>The Mono emits {@link UnsupportedOperationException} if it is subscribed more than once. If the Mono is subscribed * after the termination of SessionsMessagePump it emits {@link MessagePumpTerminatedException}.</p> * * @return the Mono to begin and cancel message pumping. */ Mono<Void> begin() { logCPUResourcesConcurrencyMismatch(); final Mono<List<RollingSessionReceiver>> createReceiversMono = Mono.fromSupplier(() -> { throwIfTerminatedOrInitialized(); final List<RollingSessionReceiver> rollingReceivers = createRollingSessionReceivers(); if (!rollingReceiversRef.compareAndSet(EMPTY, rollingReceivers)) { rollingReceivers.clear(); throwIfTerminatedOrInitialized(); } return rollingReceivers; }); final Function<List<RollingSessionReceiver>, Mono<Void>> pumpFromReceiversMono = rollingReceivers -> { final List<Mono<Void>> pumpingList = new ArrayList<>(rollingReceivers.size()); for (RollingSessionReceiver rollingReceiver : rollingReceivers) { pumpingList.add(rollingReceiver.begin()); } final Mono<Void> terminatePumping = pollConnectionState(); final Mono<Void> pumping = Mono.when(pumpingList); return Mono.firstWithSignal(terminatePumping, pumping); }; final Mono<Void> pumpingMessages = Mono.usingWhen(createReceiversMono, pumpFromReceiversMono, (__) -> terminate(TerminalSignalType.COMPLETED), (__, e) -> terminate(TerminalSignalType.ERRORED), (__) -> terminate(TerminalSignalType.CANCELED)); return pumpingMessages .onErrorMap(e -> { if (e instanceof MessagePumpTerminatedException) { return e; } return new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "pumping }) .then(Mono.error(() -> MessagePumpTerminatedException.forCompletion(pumpId, fullyQualifiedNamespace, entityPath))); } private Mono<Void> pollConnectionState() { return Flux.interval(CONNECTION_STATE_POLL_INTERVAL) .handle((ignored, sink) -> { if (sessionAcquirer.isConnectionClosed()) { final RuntimeException e = logger.atInfo() .log(new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session sink.error(e); } }).then(); } private Mono<Void> terminate(TerminalSignalType signalType) { final List<RollingSessionReceiver> rollingReceivers = rollingReceiversRef.getAndSet(TERMINATED); if (rollingReceivers == TERMINATED) { return Mono.empty(); } logger.atInfo().log("Pump terminated. signal:" + signalType); receiversTracker.clear(); onTerminate.run(); return Mono.empty(); } private List<RollingSessionReceiver> createRollingSessionReceivers() { final ArrayList<RollingSessionReceiver> rollingReceivers = new ArrayList<>(maxConcurrentSessions); for (int rollerId = 1; rollerId <= maxConcurrentSessions; rollerId++) { final RollingSessionReceiver rollingReceiver = new RollingSessionReceiver(pumpId, rollerId, instrumentation, fullyQualifiedNamespace, entityPath, nextSession, maxSessionLockRenew, sessionIdleTimeout, concurrencyPerSession, prefetch, enableAutoDisposition, serializer, retryPolicy, processMessage, processError, receiversTracker); rollingReceivers.add(rollingReceiver); } return rollingReceivers; } private void throwIfTerminatedOrInitialized() { final List<RollingSessionReceiver> l = rollingReceiversRef.get(); if (l == TERMINATED) { throw logger.atVerbose().log(new IllegalStateException("Cannot invoke begin() once terminated.")); } if (l != EMPTY) { throw logger.atVerbose().log(new IllegalStateException("Cannot invoke begin() more than once.")); } } /** * The type which provides a Mono {@link NextSession * All the {@link RollingSessionReceiver} in the {@link SessionsMessagePump} shares this Mono to obtain unique sessions. * * <p>The event the Mono fails to acquire a session, the type marks itself as terminated (i.e., self-terminate) and * notifies the subscription about the failure as {@link MessagePumpTerminatedException}. Any later subscriptions will * be notified with MessagePumpTerminatedException.</p> * * <p>If a RollingSessionReceiver encounters acquire session failure, it stops pumping and emit terminal signal. * At this point, the SessionMessagePump will cancel all other RollingSessionReceiver instances. The design of * the SessionMessagePump is to stop pumping from all sessions once any of the RollingSessionReceiver emits terminal * signal. The self-terminating nature of the shared {@link NextSession * more of the RollingSessionReceiver attempting session acquire when SessionMessagePump is about to or in progress * of canceling those.</p> */ private static final class NextSession implements Supplier<Mono<ServiceBusSessionAcquirer.Session>> { private final AtomicReference<Boolean> isTerminated = new AtomicReference<>(false); private final long pumpId; private final String fullyQualifiedNamespace; private final String entityPath; private final ServiceBusSessionAcquirer sessionAcquirer; NextSession(long pumpId, String fullyQualifiedNamespace, String entityPath, ServiceBusSessionAcquirer sessionAcquirer) { this.pumpId = pumpId; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.sessionAcquirer = sessionAcquirer; } Mono<ServiceBusSessionAcquirer.Session> mono() { final Supplier<Mono<ServiceBusSessionAcquirer.Session>> supplier = this; return Mono.defer(supplier); } @Override public Mono<ServiceBusSessionAcquirer.Session> get() { if (isTerminated.get()) { return Mono.error(new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session } return sessionAcquirer.acquire() .onErrorMap(e -> { isTerminated.set(true); return new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session }); } } /** * A type that is responsible for managing a session, concurrently (with parallelism equal to {@code concurrencyPerSession}) * pumping messages from the session and rolling to the next session when current session terminates. */ private static final class RollingSessionReceiver extends AtomicReference<State<ServiceBusSessionReactorReceiver>> { private static final String ROLLER_ID_KEY = "roller-id"; private static final State<ServiceBusSessionReactorReceiver> INIT = State.init(); private static final State<ServiceBusSessionReactorReceiver> TERMINATED = State.terminated(); private final ClientLogger logger; private final long pumpId; private final int rollerId; private final String fullyQualifiedNamespace; private final String entityPath; private final int concurrency; private final Consumer<ServiceBusReceivedMessageContext> processMessage; private final Consumer<ServiceBusErrorContext> processError; private final boolean enableAutoDisposition; private final Duration maxSessionLockRenew; private final Duration sessionIdleTimeout; private final MessageSerializer serializer; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final SessionReceiversTracker receiversTracker; private final NextSessionStream nextSessionStream; private final MessageFlux messageFlux; RollingSessionReceiver(long pumpId, int rollerId, ServiceBusReceiverInstrumentation instrumentation, String fullyQualifiedNamespace, String entityPath, Mono<ServiceBusSessionAcquirer.Session> nextSession, Duration maxSessionLockRenew, Duration sessionIdleTimeout, int concurrency, int prefetch, boolean enableAutoDisposition, MessageSerializer serializer, AmqpRetryPolicy retryPolicy, Consumer<ServiceBusReceivedMessageContext> processMessage, Consumer<ServiceBusErrorContext> processError, SessionReceiversTracker receiversTracker) { super(INIT); this.pumpId = pumpId; final Map<String, Object> loggingContext = new HashMap<>(3); loggingContext.put(ROLLER_ID_KEY, rollerId); loggingContext.put(FULLY_QUALIFIED_NAMESPACE_KEY, fullyQualifiedNamespace); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(RollingSessionReceiver.class, loggingContext); this.rollerId = rollerId; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.concurrency = concurrency; this.processError = processError; this.processMessage = processMessage; this.enableAutoDisposition = enableAutoDisposition; this.maxSessionLockRenew = maxSessionLockRenew; this.sessionIdleTimeout = sessionIdleTimeout; this.serializer = serializer; this.instrumentation = instrumentation; this.tracer = instrumentation.getTracer(); this.receiversTracker = receiversTracker; this.nextSessionStream = new NextSessionStream(pumpId, rollerId, fullyQualifiedNamespace, entityPath, nextSession); final Flux<ServiceBusSessionReactorReceiver> nextSessionReceiverStream = nextSessionStream.flux() .map(this::nextSessionReceiver); this.messageFlux = new MessageFlux(nextSessionReceiverStream, prefetch, CreditFlowMode.RequestDriven, retryPolicy); } Mono<Void> begin() { return Mono.usingWhen( Mono.fromSupplier(() -> { final Scheduler workerScheduler; if (concurrency > 1) { workerScheduler = Schedulers.newBoundedElastic( DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "rolling-session-receiver-" + rollerId); } else { workerScheduler = Schedulers.immediate(); } return workerScheduler; }), workerScheduler -> { final RunOnWorker handleMessageOnWorker = new RunOnWorker(this::handleMessage, workerScheduler); return messageFlux.flatMap(handleMessageOnWorker, concurrency, 1).then(); }, (workerScheduler) -> terminate(TerminalSignalType.COMPLETED, workerScheduler), (workerScheduler, e) -> terminate(TerminalSignalType.ERRORED, workerScheduler), (workerScheduler) -> terminate(TerminalSignalType.CANCELED, workerScheduler) ); } private Mono<Void> terminate(TerminalSignalType signalType, Scheduler workerScheduler) { final State<ServiceBusSessionReactorReceiver> state = super.getAndSet(TERMINATED); if (state == TERMINATED) { return Mono.empty(); } logger.atInfo().log("Roller terminated. rollerId:" + rollerId + " signal:" + signalType); nextSessionStream.close(); workerScheduler.dispose(); return Mono.empty(); } private ServiceBusSessionReactorReceiver nextSessionReceiver(ServiceBusSessionAcquirer.Session nextSession) { final State<ServiceBusSessionReactorReceiver> lastState = super.get(); if (lastState == TERMINATED) { nextSession.getLink().closeAsync().subscribe(); throw new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session } final ServiceBusSessionReactorReceiver nextSessionReceiver = new ServiceBusSessionReactorReceiver(logger, tracer, nextSession, sessionIdleTimeout, maxSessionLockRenew); if (!super.compareAndSet(lastState, new State<>(nextSessionReceiver))) { nextSessionReceiver.closeAsync().subscribe(); throw new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session } if (lastState != INIT) { final ServiceBusSessionReactorReceiver lastSessionReceiver = lastState.receiver; receiversTracker.untrack(lastSessionReceiver); } receiversTracker.track(nextSessionReceiver); return nextSessionReceiver; } private void handleMessage(Message qpidMessage) { final ServiceBusReceivedMessage message = serializer.deserialize(qpidMessage, ServiceBusReceivedMessage.class); instrumentation.instrumentProcess(message, ReceiverKind.PROCESSOR, msg -> { logger.atVerbose() .addKeyValue(SESSION_ID_KEY, message.getSessionId()) .addKeyValue(MESSAGE_ID_LOGGING_KEY, message.getMessageId()) .log("Received message."); final Throwable error = notifyMessage(msg); if (enableAutoDisposition) { if (error == null) { complete(msg); } else { abandon(msg); } } return error; }); } private Throwable notifyMessage(ServiceBusReceivedMessage message) { try { processMessage.accept( new ServiceBusReceivedMessageContext(receiversTracker, new ServiceBusMessageContext(message))); } catch (Exception e) { notifyError(new ServiceBusException(e, ServiceBusErrorSource.USER_CALLBACK)); return e; } return null; } private void notifyError(Throwable throwable) { try { processError.accept(new ServiceBusErrorContext(throwable, fullyQualifiedNamespace, entityPath)); } catch (Exception e) { logger.atVerbose().log("Ignoring error from user processError handler.", e); } } private void complete(ServiceBusReceivedMessage message) { try { receiversTracker.complete(message).block(); } catch (Exception e) { logger.atVerbose().log("Failed to complete message", e); } } private void abandon(ServiceBusReceivedMessage message) { try { receiversTracker.abandon(message).block(); } catch (Exception e) { logger.atVerbose().log("Failed to abandon message", e); } } /** * A type which provide a Flux {@link NextSessionStream * Each {@link RollingSessionReceiver} has a {@link NextSessionStream} instance associated, * when the {@link RollingSessionReceiver} wants to roll to a new session, it requests next session from the Flux. * * <p>Underneath, all the {@link NextSessionStream} instances (across all {@link RollingSessionReceiver}) shares * the common Mono {@link NextSession */ private static final class NextSessionStream extends AtomicBoolean { private final long pumpId; private final int rollerId; private final String fullyQualifiedNamespace; private final String entityPath; private final Mono<ServiceBusSessionAcquirer.Session> newSession; NextSessionStream(long pumpId, int rollerId, String fullyQualifiedNamespace, String entityPath, Mono<ServiceBusSessionAcquirer.Session> nextSession) { super(false); this.pumpId = pumpId; this.rollerId = rollerId; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.newSession = Mono.defer(() -> { final boolean isTerminated = super.get(); if (isTerminated) { return Mono.error(new MessagePumpTerminatedException(this.pumpId, this.fullyQualifiedNamespace, this.entityPath, "session } else { return nextSession; } }).map(session -> { final boolean isTerminated = super.get(); if (isTerminated) { session.getLink().closeAsync().subscribe(); throw new MessagePumpTerminatedException(this.pumpId, this.fullyQualifiedNamespace, this.entityPath, "session } return session; }); } Flux<ServiceBusSessionAcquirer.Session> flux() { return nonEagerRepeat(newSession); } void close() { super.set(true); } private static Flux<ServiceBusSessionAcquirer.Session> nonEagerRepeat(Mono<ServiceBusSessionAcquirer.Session> source) { return source .cacheInvalidateIf(cachedSession -> cachedSession.getLink().isDisposed()) .repeat() .filter(session -> !session.getLink().isDisposed()); } } /** * A Function that when called, invokes {@link Message} handler using a Worker thread from a {@link Scheduler}. */ private static final class RunOnWorker implements Function<Message, Publisher<Void>> { private final Consumer<Message> handleMessage; private final Scheduler workerScheduler; /** * Instantiate {@link RunOnWorker} to run the given {@code handleMessage} handler using a Worker * from the provided {@code workerScheduler}. * * @param handleMessage The message handler. * @param workerScheduler The Scheduler hosting the Worker to run the message handler. */ RunOnWorker(Consumer<Message> handleMessage, Scheduler workerScheduler) { this.handleMessage = handleMessage; this.workerScheduler = workerScheduler; } @Override public Mono<Void> apply(Message qpidMessage) { return Mono.<Void>fromRunnable(() -> { handleMessage.accept(qpidMessage); }).subscribeOn(workerScheduler); } } } /** * Tracks the running {@link ServiceBusSessionReactorReceiver} instances, each backing a RollingSessionReceiver instance. * Each time a RollingSessionReceiver rolls to a new ServiceBusSessionReactorReceiver Rn, it will track Rn by invoking * tack(Rn) and un-tracks the last (closed) ServiceBusSessionReactorReceiver Rm by invoking untrack(Rm). * * <p>The type holds sessionId to ServiceBusSessionReactorReceiver mapping. A session message can be disposition only on * the ServiceBusSessionReactorReceiver delivered it. The mapping tracked by this type enables looking up * the ServiceBusSessionReactorReceiver when a message needs to be disposition.</p> * * <p>It is possible that, a session say session-1 gets acquired by RollingSessionReceiver Ru, * while a RollingSessionReceiver Rv that was previously connected to session-1 rolls to session-2. Measures are * taken to ensure the Rv is not removing (un-track) session-1 tracked by Ru from the shared view in such concurrent * case, the underlying {@link ConcurrentHashMap */ static final class SessionReceiversTracker { private final ClientLogger logger; private final String fullyQualifiedNamespace; private final String entityPath; private final ServiceBusReceiveMode receiveMode; private final ConcurrentHashMap<String, ServiceBusSessionReactorReceiver> receivers; private final ServiceBusReceiverInstrumentation instrumentation; private SessionReceiversTracker(ClientLogger logger, int size, String fullyQualifiedNamespace, String entityPath, ServiceBusReceiveMode receiveMode, ServiceBusReceiverInstrumentation instrumentation) { this.logger = logger; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.receiveMode = receiveMode; this.receivers = new ConcurrentHashMap<>(size); this.instrumentation = instrumentation; } private void track(ServiceBusSessionReactorReceiver receiver) { receivers.put(receiver.getSessionId(), receiver); } private void untrack(ServiceBusSessionReactorReceiver receiver) { receivers.remove(receiver.getSessionId(), receiver); } private void clear() { receivers.clear(); } String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } String getEntityPath() { return entityPath; } Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.SUSPENDED, null, null, null, null); } Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.ABANDONED, options.getPropertiesToModify(), null, null, options.getTransactionContext()); } Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getPropertiesToModify(), options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getTransactionContext()); } Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.DEFERRED, options.getPropertiesToModify(), null, null, options.getTransactionContext()); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { if (receiveMode != ServiceBusReceiveMode.PEEK_LOCK) { final String m = String.format("'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus); return Mono.error(new UnsupportedOperationException(m)); } else if (message.isSettled()) { final String m = "The message has either been deleted or already settled."; return Mono.error(new IllegalArgumentException(m)); } else if (message.getLockToken() == null) { final String m = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error(new UnsupportedOperationException(m)); } final String sessionId = message.getSessionId(); final ServiceBusSessionReactorReceiver receiver = receivers.get(sessionId); final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); Mono<Void> updateDispositionMono; if (receiver != null) { updateDispositionMono = receiver.updateDisposition(message.getLockToken(), deliveryState); } else { updateDispositionMono = Mono.error(DeliveryNotOnLinkException.noMatchingDelivery(message.getLockToken(), deliveryState)); } return instrumentation.instrumentSettlement(updateDispositionMono, message, message.getContext(), dispositionStatus); } private Mono<Void> checkNull(Object options, ServiceBusTransactionContext transactionContext) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (transactionContext != null && transactionContext.getTransactionId() == null) { return monoError(logger, new NullPointerException("'options.transactionContext.transactionId' cannot be null.")); } return null; } } private static final class State<T extends AsyncCloseable> { final T receiver; State(T receiver) { this.receiver = Objects.requireNonNull(receiver); } static <T extends AsyncCloseable> State<T> init() { return new State<>(); } static <T extends AsyncCloseable> State<T> terminated() { return new State<>(); } private State() { this.receiver = null; } } /** * The signal that triggered {@link SessionsMessagePump} and {@link RollingSessionReceiver} termination. */ private enum TerminalSignalType { COMPLETED, ERRORED, CANCELED, } }
right, this make sense to be in WARN level, I'll update.
private void logCPUResourcesConcurrencyMismatch() { final int cores = Runtime.getRuntime().availableProcessors(); final int poolSize = DEFAULT_BOUNDED_ELASTIC_SIZE; final int concurrency = maxConcurrentSessions * concurrencyPerSession; if (concurrencyPerSession > poolSize || concurrency > CONCURRENCY_PER_CORE * cores) { final String message = concurrency + " (ConcurrentSessions=" + maxConcurrentSessions + ", ConcurrencyPerSession=" + concurrencyPerSession + ")"; logger.atInfo().log(CORES_VS_CONCURRENCY_MESSAGE, poolSize, cores, message); } }
logger.atInfo().log(CORES_VS_CONCURRENCY_MESSAGE, poolSize, cores, message);
private void logCPUResourcesConcurrencyMismatch() { final int cores = Runtime.getRuntime().availableProcessors(); final int poolSize = DEFAULT_BOUNDED_ELASTIC_SIZE; final int concurrency = maxConcurrentSessions * concurrencyPerSession; if (concurrencyPerSession > poolSize || concurrency > CONCURRENCY_PER_CORE * cores) { final String message = concurrency + " (ConcurrentSessions=" + maxConcurrentSessions + ", ConcurrencyPerSession=" + concurrencyPerSession + ")"; logger.atWarning().log(CORES_VS_CONCURRENCY_MESSAGE, poolSize, cores, message); } }
class SessionsMessagePump { private static final AtomicLong COUNTER = new AtomicLong(); private static final ArrayList<RollingSessionReceiver> EMPTY = new ArrayList<>(0); private static final ArrayList<RollingSessionReceiver> TERMINATED = new ArrayList<>(0); private static final Duration CONNECTION_STATE_POLL_INTERVAL = Duration.ofSeconds(20); private final long pumpId; private final ClientLogger logger; private final String identifier; private final String fullyQualifiedNamespace; private final String entityPath; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusSessionAcquirer sessionAcquirer; private final Duration maxSessionLockRenew; private final Duration sessionIdleTimeout; private final int maxConcurrentSessions; private final int concurrencyPerSession; private final int prefetch; private final boolean enableAutoDisposition; private final MessageSerializer serializer; private final AmqpRetryPolicy retryPolicy; private final Consumer<ServiceBusReceivedMessageContext> processMessage; private final Consumer<ServiceBusErrorContext> processError; private final Runnable onTerminate; private final AtomicReference<List<RollingSessionReceiver>> rollingReceiversRef = new AtomicReference<>(EMPTY); private final SessionReceiversTracker receiversTracker; private final Mono<ServiceBusSessionAcquirer.Session> nextSession; SessionsMessagePump(String identifier, String fullyQualifiedNamespace, String entityPath, ServiceBusReceiveMode receiveMode, ServiceBusReceiverInstrumentation instrumentation, ServiceBusSessionAcquirer sessionAcquirer, Duration maxSessionLockRenew, Duration sessionIdleTimeout, int maxConcurrentSessions, int concurrencyPerSession, int prefetch, boolean enableAutoDisposition, MessageSerializer serializer, AmqpRetryPolicy retryPolicy, Consumer<ServiceBusReceivedMessageContext> processMessage, Consumer<ServiceBusErrorContext> processError, Runnable onTerminate) { this.pumpId = COUNTER.incrementAndGet(); final Map<String, Object> loggingContext = new HashMap<>(3); loggingContext.put(PUMP_ID_KEY, pumpId); loggingContext.put(FULLY_QUALIFIED_NAMESPACE_KEY, fullyQualifiedNamespace); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(SessionsMessagePump.class, loggingContext); this.identifier = identifier; this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); Objects.requireNonNull(receiveMode, "'receiveMode' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'instrumentation' cannot be null"); this.sessionAcquirer = Objects.requireNonNull(sessionAcquirer, "'sessionAcquirer' cannot be null"); this.maxSessionLockRenew = Objects.requireNonNull(maxSessionLockRenew, "'maxSessionLockRenew' cannot be null."); this.sessionIdleTimeout = sessionIdleTimeout != null ? sessionIdleTimeout : retryPolicy.getRetryOptions().getTryTimeout(); this.maxConcurrentSessions = maxConcurrentSessions; this.concurrencyPerSession = concurrencyPerSession; this.prefetch = prefetch; this.enableAutoDisposition = enableAutoDisposition; this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null."); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null."); this.onTerminate = Objects.requireNonNull(onTerminate, "'onTerminate' cannot be null."); this.receiversTracker = new SessionReceiversTracker(logger, maxConcurrentSessions, fullyQualifiedNamespace, entityPath, receiveMode, instrumentation); this.nextSession = new NextSession(pumpId, fullyQualifiedNamespace, entityPath, sessionAcquirer).mono(); } String getIdentifier() { return identifier; } /** * Obtain a Mono that when subscribed, start pumping messages from {@code maxConcurrentSessions} sessions. * The Mono emits terminal signal once there is a failure in obtaining a new session. * * <p>The Mono emits {@link UnsupportedOperationException} if it is subscribed more than once. If the Mono is subscribed * after the termination of SessionsMessagePump it emits {@link MessagePumpTerminatedException}.</p> * * @return the Mono to begin and cancel message pumping. */ Mono<Void> begin() { logCPUResourcesConcurrencyMismatch(); final Mono<List<RollingSessionReceiver>> createReceiversMono = Mono.fromSupplier(() -> { throwIfTerminatedOrInitialized(); final List<RollingSessionReceiver> rollingReceivers = createRollingSessionReceivers(); if (!rollingReceiversRef.compareAndSet(EMPTY, rollingReceivers)) { rollingReceivers.clear(); throwIfTerminatedOrInitialized(); } return rollingReceivers; }); final Function<List<RollingSessionReceiver>, Mono<Void>> pumpFromReceiversMono = rollingReceivers -> { final List<Mono<Void>> pumpingList = new ArrayList<>(rollingReceivers.size()); for (RollingSessionReceiver rollingReceiver : rollingReceivers) { pumpingList.add(rollingReceiver.begin()); } final Mono<Void> terminatePumping = pollConnectionState(); final Mono<Void> pumping = Mono.when(pumpingList); return Mono.firstWithSignal(terminatePumping, pumping); }; final Mono<Void> pumpingMessages = Mono.usingWhen(createReceiversMono, pumpFromReceiversMono, (__) -> terminate(TerminalSignalType.COMPLETED), (__, e) -> terminate(TerminalSignalType.ERRORED), (__) -> terminate(TerminalSignalType.CANCELED)); return pumpingMessages .onErrorMap(e -> { if (e instanceof MessagePumpTerminatedException) { return e; } return new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "pumping }) .then(Mono.error(() -> MessagePumpTerminatedException.forCompletion(pumpId, fullyQualifiedNamespace, entityPath))); } private Mono<Void> pollConnectionState() { return Flux.interval(CONNECTION_STATE_POLL_INTERVAL) .handle((ignored, sink) -> { if (sessionAcquirer.isConnectionClosed()) { final RuntimeException e = logger.atInfo() .log(new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session sink.error(e); } }).then(); } private Mono<Void> terminate(TerminalSignalType signalType) { final List<RollingSessionReceiver> rollingReceivers = rollingReceiversRef.getAndSet(TERMINATED); if (rollingReceivers == TERMINATED) { return Mono.empty(); } logger.atInfo().log("Pump terminated. signal:" + signalType); receiversTracker.clear(); onTerminate.run(); return Mono.empty(); } private List<RollingSessionReceiver> createRollingSessionReceivers() { final ArrayList<RollingSessionReceiver> rollingReceivers = new ArrayList<>(maxConcurrentSessions); for (int rollerId = 1; rollerId <= maxConcurrentSessions; rollerId++) { final RollingSessionReceiver rollingReceiver = new RollingSessionReceiver(pumpId, rollerId, instrumentation, fullyQualifiedNamespace, entityPath, nextSession, maxSessionLockRenew, sessionIdleTimeout, concurrencyPerSession, prefetch, enableAutoDisposition, serializer, retryPolicy, processMessage, processError, receiversTracker); rollingReceivers.add(rollingReceiver); } return rollingReceivers; } private void throwIfTerminatedOrInitialized() { final List<RollingSessionReceiver> l = rollingReceiversRef.get(); if (l == TERMINATED) { throw logger.atVerbose().log(new IllegalStateException("Cannot invoke begin() once terminated.")); } if (l != EMPTY) { throw logger.atVerbose().log(new IllegalStateException("Cannot invoke begin() more than once.")); } } /** * The type which provides a Mono {@link NextSession * All the {@link RollingSessionReceiver} in the {@link SessionsMessagePump} shares this Mono to obtain unique sessions. * * <p>The event the Mono fails to acquire a session, the type marks itself as terminated (i.e., self-terminate) and * notifies the subscription about the failure as {@link MessagePumpTerminatedException}. Any later subscriptions will * be notified with MessagePumpTerminatedException.</p> * * <p>If a RollingSessionReceiver encounters acquire session failure, it stops pumping and emit terminal signal. * At this point, the SessionMessagePump will cancel all other RollingSessionReceiver instances. The design of * the SessionMessagePump is to stop pumping from all sessions once any of the RollingSessionReceiver emits terminal * signal. The self-terminating nature of the shared {@link NextSession * more of the RollingSessionReceiver attempting session acquire when SessionMessagePump is about to or in progress * of canceling those.</p> */ private static final class NextSession implements Supplier<Mono<ServiceBusSessionAcquirer.Session>> { private final AtomicReference<Boolean> isTerminated = new AtomicReference<>(false); private final long pumpId; private final String fullyQualifiedNamespace; private final String entityPath; private final ServiceBusSessionAcquirer sessionAcquirer; NextSession(long pumpId, String fullyQualifiedNamespace, String entityPath, ServiceBusSessionAcquirer sessionAcquirer) { this.pumpId = pumpId; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.sessionAcquirer = sessionAcquirer; } Mono<ServiceBusSessionAcquirer.Session> mono() { final Supplier<Mono<ServiceBusSessionAcquirer.Session>> supplier = this; return Mono.defer(supplier); } @Override public Mono<ServiceBusSessionAcquirer.Session> get() { if (isTerminated.get()) { return Mono.error(new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session } return sessionAcquirer.acquire() .onErrorMap(e -> { isTerminated.set(true); return new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session }); } } /** * A type that is responsible for managing a session, concurrently (with parallelism equal to {@code concurrencyPerSession}) * pumping messages from the session and rolling to the next session when current session terminates. */ private static final class RollingSessionReceiver extends AtomicReference<State<ServiceBusSessionReactorReceiver>> { private static final String ROLLER_ID_KEY = "roller-id"; private static final State<ServiceBusSessionReactorReceiver> INIT = State.init(); private static final State<ServiceBusSessionReactorReceiver> TERMINATED = State.terminated(); private final ClientLogger logger; private final long pumpId; private final int rollerId; private final String fullyQualifiedNamespace; private final String entityPath; private final int concurrency; private final Consumer<ServiceBusReceivedMessageContext> processMessage; private final Consumer<ServiceBusErrorContext> processError; private final boolean enableAutoDisposition; private final Duration maxSessionLockRenew; private final Duration sessionIdleTimeout; private final MessageSerializer serializer; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final SessionReceiversTracker receiversTracker; private final NextSessionStream nextSessionStream; private final MessageFlux messageFlux; RollingSessionReceiver(long pumpId, int rollerId, ServiceBusReceiverInstrumentation instrumentation, String fullyQualifiedNamespace, String entityPath, Mono<ServiceBusSessionAcquirer.Session> nextSession, Duration maxSessionLockRenew, Duration sessionIdleTimeout, int concurrency, int prefetch, boolean enableAutoDisposition, MessageSerializer serializer, AmqpRetryPolicy retryPolicy, Consumer<ServiceBusReceivedMessageContext> processMessage, Consumer<ServiceBusErrorContext> processError, SessionReceiversTracker receiversTracker) { super(INIT); this.pumpId = pumpId; final Map<String, Object> loggingContext = new HashMap<>(3); loggingContext.put(ROLLER_ID_KEY, rollerId); loggingContext.put(FULLY_QUALIFIED_NAMESPACE_KEY, fullyQualifiedNamespace); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(RollingSessionReceiver.class, loggingContext); this.rollerId = rollerId; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.concurrency = concurrency; this.processError = processError; this.processMessage = processMessage; this.enableAutoDisposition = enableAutoDisposition; this.maxSessionLockRenew = maxSessionLockRenew; this.sessionIdleTimeout = sessionIdleTimeout; this.serializer = serializer; this.instrumentation = instrumentation; this.tracer = instrumentation.getTracer(); this.receiversTracker = receiversTracker; this.nextSessionStream = new NextSessionStream(pumpId, rollerId, fullyQualifiedNamespace, entityPath, nextSession); final Flux<ServiceBusSessionReactorReceiver> nextSessionReceiverStream = nextSessionStream.flux() .map(this::nextSessionReceiver); this.messageFlux = new MessageFlux(nextSessionReceiverStream, prefetch, CreditFlowMode.RequestDriven, retryPolicy); } Mono<Void> begin() { return Mono.usingWhen( Mono.fromSupplier(() -> { final Scheduler workerScheduler; if (concurrency > 1) { workerScheduler = Schedulers.newBoundedElastic( DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "rolling-session-receiver-" + rollerId); } else { workerScheduler = Schedulers.immediate(); } return workerScheduler; }), workerScheduler -> { final RunOnWorker handleMessageOnWorker = new RunOnWorker(this::handleMessage, workerScheduler); return messageFlux.flatMap(handleMessageOnWorker, concurrency, 1).then(); }, (workerScheduler) -> terminate(TerminalSignalType.COMPLETED, workerScheduler), (workerScheduler, e) -> terminate(TerminalSignalType.ERRORED, workerScheduler), (workerScheduler) -> terminate(TerminalSignalType.CANCELED, workerScheduler) ); } private Mono<Void> terminate(TerminalSignalType signalType, Scheduler workerScheduler) { final State<ServiceBusSessionReactorReceiver> state = super.getAndSet(TERMINATED); if (state == TERMINATED) { return Mono.empty(); } logger.atInfo().log("Roller terminated. rollerId:" + rollerId + " signal:" + signalType); nextSessionStream.close(); workerScheduler.dispose(); return Mono.empty(); } private ServiceBusSessionReactorReceiver nextSessionReceiver(ServiceBusSessionAcquirer.Session nextSession) { final State<ServiceBusSessionReactorReceiver> lastState = super.get(); if (lastState == TERMINATED) { nextSession.getLink().closeAsync().subscribe(); throw new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session } final ServiceBusSessionReactorReceiver nextSessionReceiver = new ServiceBusSessionReactorReceiver(logger, tracer, nextSession, sessionIdleTimeout, maxSessionLockRenew); if (!super.compareAndSet(lastState, new State<>(nextSessionReceiver))) { nextSessionReceiver.closeAsync().subscribe(); throw new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session } if (lastState != INIT) { final ServiceBusSessionReactorReceiver lastSessionReceiver = lastState.receiver; receiversTracker.untrack(lastSessionReceiver); } receiversTracker.track(nextSessionReceiver); return nextSessionReceiver; } private void handleMessage(Message qpidMessage) { final ServiceBusReceivedMessage message = serializer.deserialize(qpidMessage, ServiceBusReceivedMessage.class); instrumentation.instrumentProcess(message, ReceiverKind.PROCESSOR, msg -> { logger.atVerbose() .addKeyValue(SESSION_ID_KEY, message.getSessionId()) .addKeyValue(MESSAGE_ID_LOGGING_KEY, message.getMessageId()) .log("Received message."); final Throwable error = notifyMessage(msg); if (enableAutoDisposition) { if (error == null) { complete(msg); } else { abandon(msg); } } return error; }); } private Throwable notifyMessage(ServiceBusReceivedMessage message) { try { processMessage.accept( new ServiceBusReceivedMessageContext(receiversTracker, new ServiceBusMessageContext(message))); } catch (Exception e) { notifyError(new ServiceBusException(e, ServiceBusErrorSource.USER_CALLBACK)); return e; } return null; } private void notifyError(Throwable throwable) { try { processError.accept(new ServiceBusErrorContext(throwable, fullyQualifiedNamespace, entityPath)); } catch (Exception e) { logger.atVerbose().log("Ignoring error from user processError handler.", e); } } private void complete(ServiceBusReceivedMessage message) { try { receiversTracker.complete(message).block(); } catch (Exception e) { logger.atVerbose().log("Failed to complete message", e); } } private void abandon(ServiceBusReceivedMessage message) { try { receiversTracker.abandon(message).block(); } catch (Exception e) { logger.atVerbose().log("Failed to abandon message", e); } } /** * A type which provide a Flux {@link NextSessionStream * Each {@link RollingSessionReceiver} has a {@link NextSessionStream} instance associated, * when the {@link RollingSessionReceiver} wants to roll to a new session, it requests next session from the Flux. * * <p>Underneath, all the {@link NextSessionStream} instances (across all {@link RollingSessionReceiver}) shares * the common Mono {@link NextSession */ private static final class NextSessionStream extends AtomicBoolean { private final long pumpId; private final int rollerId; private final String fullyQualifiedNamespace; private final String entityPath; private final Mono<ServiceBusSessionAcquirer.Session> newSession; NextSessionStream(long pumpId, int rollerId, String fullyQualifiedNamespace, String entityPath, Mono<ServiceBusSessionAcquirer.Session> nextSession) { super(false); this.pumpId = pumpId; this.rollerId = rollerId; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.newSession = Mono.defer(() -> { final boolean isTerminated = super.get(); if (isTerminated) { return Mono.error(new MessagePumpTerminatedException(this.pumpId, this.fullyQualifiedNamespace, this.entityPath, "session } else { return nextSession; } }).map(session -> { final boolean isTerminated = super.get(); if (isTerminated) { session.getLink().closeAsync().subscribe(); throw new MessagePumpTerminatedException(this.pumpId, this.fullyQualifiedNamespace, this.entityPath, "session } return session; }); } Flux<ServiceBusSessionAcquirer.Session> flux() { return nonEagerRepeat(newSession); } void close() { super.set(true); } private static Flux<ServiceBusSessionAcquirer.Session> nonEagerRepeat(Mono<ServiceBusSessionAcquirer.Session> source) { return source .cacheInvalidateIf(cachedSession -> cachedSession.getLink().isDisposed()) .repeat() .filter(session -> !session.getLink().isDisposed()); } } /** * A Function that when called, invokes {@link Message} handler using a Worker thread from a {@link Scheduler}. */ private static final class RunOnWorker implements Function<Message, Publisher<Void>> { private final Consumer<Message> handleMessage; private final Scheduler workerScheduler; /** * Instantiate {@link RunOnWorker} to run the given {@code handleMessage} handler using a Worker * from the provided {@code workerScheduler}. * * @param handleMessage The message handler. * @param workerScheduler The Scheduler hosting the Worker to run the message handler. */ RunOnWorker(Consumer<Message> handleMessage, Scheduler workerScheduler) { this.handleMessage = handleMessage; this.workerScheduler = workerScheduler; } @Override public Mono<Void> apply(Message qpidMessage) { return Mono.<Void>fromRunnable(() -> { handleMessage.accept(qpidMessage); }).subscribeOn(workerScheduler); } } } /** * Tracks the running {@link ServiceBusSessionReactorReceiver} instances, each backing a RollingSessionReceiver instance. * Each time a RollingSessionReceiver rolls to a new ServiceBusSessionReactorReceiver Rn, it will track Rn by invoking * tack(Rn) and un-tracks the last (closed) ServiceBusSessionReactorReceiver Rm by invoking untrack(Rm). * * <p>The type holds sessionId to ServiceBusSessionReactorReceiver mapping. A session message can be disposition only on * the ServiceBusSessionReactorReceiver delivered it. The mapping tracked by this type enables looking up * the ServiceBusSessionReactorReceiver when a message needs to be disposition.</p> * * <p>It is possible that, a session say session-1 gets acquired by RollingSessionReceiver Ru, * while a RollingSessionReceiver Rv that was previously connected to session-1 rolls to session-2. Measures are * taken to ensure the Rv is not removing (un-track) session-1 tracked by Ru from the shared view in such concurrent * case, the underlying {@link ConcurrentHashMap */ static final class SessionReceiversTracker { private final ClientLogger logger; private final String fullyQualifiedNamespace; private final String entityPath; private final ServiceBusReceiveMode receiveMode; private final ConcurrentHashMap<String, ServiceBusSessionReactorReceiver> receivers; private final ServiceBusReceiverInstrumentation instrumentation; private SessionReceiversTracker(ClientLogger logger, int size, String fullyQualifiedNamespace, String entityPath, ServiceBusReceiveMode receiveMode, ServiceBusReceiverInstrumentation instrumentation) { this.logger = logger; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.receiveMode = receiveMode; this.receivers = new ConcurrentHashMap<>(size); this.instrumentation = instrumentation; } private void track(ServiceBusSessionReactorReceiver receiver) { receivers.put(receiver.getSessionId(), receiver); } private void untrack(ServiceBusSessionReactorReceiver receiver) { receivers.remove(receiver.getSessionId(), receiver); } private void clear() { receivers.clear(); } String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } String getEntityPath() { return entityPath; } Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.SUSPENDED, null, null, null, null); } Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.ABANDONED, options.getPropertiesToModify(), null, null, options.getTransactionContext()); } Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getPropertiesToModify(), options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getTransactionContext()); } Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.DEFERRED, options.getPropertiesToModify(), null, null, options.getTransactionContext()); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { if (receiveMode != ServiceBusReceiveMode.PEEK_LOCK) { final String m = String.format("'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus); return Mono.error(new UnsupportedOperationException(m)); } else if (message.isSettled()) { final String m = "The message has either been deleted or already settled."; return Mono.error(new IllegalArgumentException(m)); } else if (message.getLockToken() == null) { final String m = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error(new UnsupportedOperationException(m)); } final String sessionId = message.getSessionId(); final ServiceBusSessionReactorReceiver receiver = receivers.get(sessionId); final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); Mono<Void> updateDispositionMono; if (receiver != null) { updateDispositionMono = receiver.updateDisposition(message.getLockToken(), deliveryState); } else { updateDispositionMono = Mono.error(DeliveryNotOnLinkException.noMatchingDelivery(message.getLockToken(), deliveryState)); } return instrumentation.instrumentSettlement(updateDispositionMono, message, message.getContext(), dispositionStatus); } private Mono<Void> checkNull(Object options, ServiceBusTransactionContext transactionContext) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (transactionContext != null && transactionContext.getTransactionId() == null) { return monoError(logger, new NullPointerException("'options.transactionContext.transactionId' cannot be null.")); } return null; } } private static final class State<T extends AsyncCloseable> { final T receiver; State(T receiver) { this.receiver = Objects.requireNonNull(receiver); } static <T extends AsyncCloseable> State<T> init() { return new State<>(); } static <T extends AsyncCloseable> State<T> terminated() { return new State<>(); } private State() { this.receiver = null; } } /** * The signal that triggered {@link SessionsMessagePump} and {@link RollingSessionReceiver} termination. */ private enum TerminalSignalType { COMPLETED, ERRORED, CANCELED, } }
class SessionsMessagePump { private static final AtomicLong COUNTER = new AtomicLong(); private static final ArrayList<RollingSessionReceiver> EMPTY = new ArrayList<>(0); private static final ArrayList<RollingSessionReceiver> TERMINATED = new ArrayList<>(0); private static final Duration CONNECTION_STATE_POLL_INTERVAL = Duration.ofSeconds(20); private final long pumpId; private final ClientLogger logger; private final String identifier; private final String fullyQualifiedNamespace; private final String entityPath; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusSessionAcquirer sessionAcquirer; private final Duration maxSessionLockRenew; private final Duration sessionIdleTimeout; private final int maxConcurrentSessions; private final int concurrencyPerSession; private final int prefetch; private final boolean enableAutoDisposition; private final MessageSerializer serializer; private final AmqpRetryPolicy retryPolicy; private final Consumer<ServiceBusReceivedMessageContext> processMessage; private final Consumer<ServiceBusErrorContext> processError; private final Runnable onTerminate; private final AtomicReference<List<RollingSessionReceiver>> rollingReceiversRef = new AtomicReference<>(EMPTY); private final SessionReceiversTracker receiversTracker; private final Mono<ServiceBusSessionAcquirer.Session> nextSession; SessionsMessagePump(String identifier, String fullyQualifiedNamespace, String entityPath, ServiceBusReceiveMode receiveMode, ServiceBusReceiverInstrumentation instrumentation, ServiceBusSessionAcquirer sessionAcquirer, Duration maxSessionLockRenew, Duration sessionIdleTimeout, int maxConcurrentSessions, int concurrencyPerSession, int prefetch, boolean enableAutoDisposition, MessageSerializer serializer, AmqpRetryPolicy retryPolicy, Consumer<ServiceBusReceivedMessageContext> processMessage, Consumer<ServiceBusErrorContext> processError, Runnable onTerminate) { this.pumpId = COUNTER.incrementAndGet(); final Map<String, Object> loggingContext = new HashMap<>(3); loggingContext.put(PUMP_ID_KEY, pumpId); loggingContext.put(FULLY_QUALIFIED_NAMESPACE_KEY, fullyQualifiedNamespace); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(SessionsMessagePump.class, loggingContext); this.identifier = identifier; this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); Objects.requireNonNull(receiveMode, "'receiveMode' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'instrumentation' cannot be null"); this.sessionAcquirer = Objects.requireNonNull(sessionAcquirer, "'sessionAcquirer' cannot be null"); this.maxSessionLockRenew = Objects.requireNonNull(maxSessionLockRenew, "'maxSessionLockRenew' cannot be null."); this.sessionIdleTimeout = sessionIdleTimeout != null ? sessionIdleTimeout : retryPolicy.getRetryOptions().getTryTimeout(); this.maxConcurrentSessions = maxConcurrentSessions; this.concurrencyPerSession = concurrencyPerSession; this.prefetch = prefetch; this.enableAutoDisposition = enableAutoDisposition; this.serializer = Objects.requireNonNull(serializer, "'serializer' cannot be null."); this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null."); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null."); this.onTerminate = Objects.requireNonNull(onTerminate, "'onTerminate' cannot be null."); this.receiversTracker = new SessionReceiversTracker(logger, maxConcurrentSessions, fullyQualifiedNamespace, entityPath, receiveMode, instrumentation); this.nextSession = new NextSession(pumpId, fullyQualifiedNamespace, entityPath, sessionAcquirer).mono(); } String getIdentifier() { return identifier; } /** * Obtain a Mono that when subscribed, start pumping messages from {@code maxConcurrentSessions} sessions. * The Mono emits terminal signal once there is a failure in obtaining a new session. * * <p>The Mono emits {@link UnsupportedOperationException} if it is subscribed more than once. If the Mono is subscribed * after the termination of SessionsMessagePump it emits {@link MessagePumpTerminatedException}.</p> * * @return the Mono to begin and cancel message pumping. */ Mono<Void> begin() { logCPUResourcesConcurrencyMismatch(); final Mono<List<RollingSessionReceiver>> createReceiversMono = Mono.fromSupplier(() -> { throwIfTerminatedOrInitialized(); final List<RollingSessionReceiver> rollingReceivers = createRollingSessionReceivers(); if (!rollingReceiversRef.compareAndSet(EMPTY, rollingReceivers)) { rollingReceivers.clear(); throwIfTerminatedOrInitialized(); } return rollingReceivers; }); final Function<List<RollingSessionReceiver>, Mono<Void>> pumpFromReceiversMono = rollingReceivers -> { final List<Mono<Void>> pumpingList = new ArrayList<>(rollingReceivers.size()); for (RollingSessionReceiver rollingReceiver : rollingReceivers) { pumpingList.add(rollingReceiver.begin()); } final Mono<Void> terminatePumping = pollConnectionState(); final Mono<Void> pumping = Mono.when(pumpingList); return Mono.firstWithSignal(terminatePumping, pumping); }; final Mono<Void> pumpingMessages = Mono.usingWhen(createReceiversMono, pumpFromReceiversMono, (__) -> terminate(TerminalSignalType.COMPLETED), (__, e) -> terminate(TerminalSignalType.ERRORED), (__) -> terminate(TerminalSignalType.CANCELED)); return pumpingMessages .onErrorMap(e -> { if (e instanceof MessagePumpTerminatedException) { return e; } return new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "pumping }) .then(Mono.error(() -> MessagePumpTerminatedException.forCompletion(pumpId, fullyQualifiedNamespace, entityPath))); } private Mono<Void> pollConnectionState() { return Flux.interval(CONNECTION_STATE_POLL_INTERVAL) .handle((ignored, sink) -> { if (sessionAcquirer.isConnectionClosed()) { final RuntimeException e = logger.atInfo() .log(new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session sink.error(e); } }).then(); } private Mono<Void> terminate(TerminalSignalType signalType) { final List<RollingSessionReceiver> rollingReceivers = rollingReceiversRef.getAndSet(TERMINATED); if (rollingReceivers == TERMINATED) { return Mono.empty(); } logger.atInfo().log("Pump terminated. signal:" + signalType); receiversTracker.clear(); onTerminate.run(); return Mono.empty(); } private List<RollingSessionReceiver> createRollingSessionReceivers() { final ArrayList<RollingSessionReceiver> rollingReceivers = new ArrayList<>(maxConcurrentSessions); for (int rollerId = 1; rollerId <= maxConcurrentSessions; rollerId++) { final RollingSessionReceiver rollingReceiver = new RollingSessionReceiver(pumpId, rollerId, instrumentation, fullyQualifiedNamespace, entityPath, nextSession, maxSessionLockRenew, sessionIdleTimeout, concurrencyPerSession, prefetch, enableAutoDisposition, serializer, retryPolicy, processMessage, processError, receiversTracker); rollingReceivers.add(rollingReceiver); } return rollingReceivers; } private void throwIfTerminatedOrInitialized() { final List<RollingSessionReceiver> l = rollingReceiversRef.get(); if (l == TERMINATED) { throw logger.atVerbose().log(new IllegalStateException("Cannot invoke begin() once terminated.")); } if (l != EMPTY) { throw logger.atVerbose().log(new IllegalStateException("Cannot invoke begin() more than once.")); } } /** * The type which provides a Mono {@link NextSession * All the {@link RollingSessionReceiver} in the {@link SessionsMessagePump} shares this Mono to obtain unique sessions. * * <p>The event the Mono fails to acquire a session, the type marks itself as terminated (i.e., self-terminate) and * notifies the subscription about the failure as {@link MessagePumpTerminatedException}. Any later subscriptions will * be notified with MessagePumpTerminatedException.</p> * * <p>If a RollingSessionReceiver encounters acquire session failure, it stops pumping and emit terminal signal. * At this point, the SessionMessagePump will cancel all other RollingSessionReceiver instances. The design of * the SessionMessagePump is to stop pumping from all sessions once any of the RollingSessionReceiver emits terminal * signal. The self-terminating nature of the shared {@link NextSession * more of the RollingSessionReceiver attempting session acquire when SessionMessagePump is about to or in progress * of canceling those.</p> */ private static final class NextSession implements Supplier<Mono<ServiceBusSessionAcquirer.Session>> { private final AtomicReference<Boolean> isTerminated = new AtomicReference<>(false); private final long pumpId; private final String fullyQualifiedNamespace; private final String entityPath; private final ServiceBusSessionAcquirer sessionAcquirer; NextSession(long pumpId, String fullyQualifiedNamespace, String entityPath, ServiceBusSessionAcquirer sessionAcquirer) { this.pumpId = pumpId; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.sessionAcquirer = sessionAcquirer; } Mono<ServiceBusSessionAcquirer.Session> mono() { final Supplier<Mono<ServiceBusSessionAcquirer.Session>> supplier = this; return Mono.defer(supplier); } @Override public Mono<ServiceBusSessionAcquirer.Session> get() { if (isTerminated.get()) { return Mono.error(new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session } return sessionAcquirer.acquire() .onErrorMap(e -> { isTerminated.set(true); return new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session }); } } /** * A type that is responsible for managing a session, concurrently (with parallelism equal to {@code concurrencyPerSession}) * pumping messages from the session and rolling to the next session when current session terminates. */ private static final class RollingSessionReceiver extends AtomicReference<State<ServiceBusSessionReactorReceiver>> { private static final String ROLLER_ID_KEY = "roller-id"; private static final State<ServiceBusSessionReactorReceiver> INIT = State.init(); private static final State<ServiceBusSessionReactorReceiver> TERMINATED = State.terminated(); private final ClientLogger logger; private final long pumpId; private final int rollerId; private final String fullyQualifiedNamespace; private final String entityPath; private final int concurrency; private final Consumer<ServiceBusReceivedMessageContext> processMessage; private final Consumer<ServiceBusErrorContext> processError; private final boolean enableAutoDisposition; private final Duration maxSessionLockRenew; private final Duration sessionIdleTimeout; private final MessageSerializer serializer; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final SessionReceiversTracker receiversTracker; private final NextSessionStream nextSessionStream; private final MessageFlux messageFlux; RollingSessionReceiver(long pumpId, int rollerId, ServiceBusReceiverInstrumentation instrumentation, String fullyQualifiedNamespace, String entityPath, Mono<ServiceBusSessionAcquirer.Session> nextSession, Duration maxSessionLockRenew, Duration sessionIdleTimeout, int concurrency, int prefetch, boolean enableAutoDisposition, MessageSerializer serializer, AmqpRetryPolicy retryPolicy, Consumer<ServiceBusReceivedMessageContext> processMessage, Consumer<ServiceBusErrorContext> processError, SessionReceiversTracker receiversTracker) { super(INIT); this.pumpId = pumpId; final Map<String, Object> loggingContext = new HashMap<>(3); loggingContext.put(ROLLER_ID_KEY, rollerId); loggingContext.put(FULLY_QUALIFIED_NAMESPACE_KEY, fullyQualifiedNamespace); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(RollingSessionReceiver.class, loggingContext); this.rollerId = rollerId; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.concurrency = concurrency; this.processError = processError; this.processMessage = processMessage; this.enableAutoDisposition = enableAutoDisposition; this.maxSessionLockRenew = maxSessionLockRenew; this.sessionIdleTimeout = sessionIdleTimeout; this.serializer = serializer; this.instrumentation = instrumentation; this.tracer = instrumentation.getTracer(); this.receiversTracker = receiversTracker; this.nextSessionStream = new NextSessionStream(pumpId, rollerId, fullyQualifiedNamespace, entityPath, nextSession); final Flux<ServiceBusSessionReactorReceiver> nextSessionReceiverStream = nextSessionStream.flux() .map(this::nextSessionReceiver); this.messageFlux = new MessageFlux(nextSessionReceiverStream, prefetch, CreditFlowMode.RequestDriven, retryPolicy); } Mono<Void> begin() { return Mono.usingWhen( Mono.fromSupplier(() -> { final Scheduler workerScheduler; if (concurrency > 1) { workerScheduler = Schedulers.newBoundedElastic( DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "rolling-session-receiver-" + rollerId); } else { workerScheduler = Schedulers.immediate(); } return workerScheduler; }), workerScheduler -> { final RunOnWorker handleMessageOnWorker = new RunOnWorker(this::handleMessage, workerScheduler); return messageFlux.flatMap(handleMessageOnWorker, concurrency, 1).then(); }, (workerScheduler) -> terminate(TerminalSignalType.COMPLETED, workerScheduler), (workerScheduler, e) -> terminate(TerminalSignalType.ERRORED, workerScheduler), (workerScheduler) -> terminate(TerminalSignalType.CANCELED, workerScheduler) ); } private Mono<Void> terminate(TerminalSignalType signalType, Scheduler workerScheduler) { final State<ServiceBusSessionReactorReceiver> state = super.getAndSet(TERMINATED); if (state == TERMINATED) { return Mono.empty(); } logger.atInfo().log("Roller terminated. rollerId:" + rollerId + " signal:" + signalType); nextSessionStream.close(); workerScheduler.dispose(); return Mono.empty(); } private ServiceBusSessionReactorReceiver nextSessionReceiver(ServiceBusSessionAcquirer.Session nextSession) { final State<ServiceBusSessionReactorReceiver> lastState = super.get(); if (lastState == TERMINATED) { nextSession.getLink().closeAsync().subscribe(); throw new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session } final ServiceBusSessionReactorReceiver nextSessionReceiver = new ServiceBusSessionReactorReceiver(logger, tracer, nextSession, sessionIdleTimeout, maxSessionLockRenew); if (!super.compareAndSet(lastState, new State<>(nextSessionReceiver))) { nextSessionReceiver.closeAsync().subscribe(); throw new MessagePumpTerminatedException(pumpId, fullyQualifiedNamespace, entityPath, "session } if (lastState != INIT) { final ServiceBusSessionReactorReceiver lastSessionReceiver = lastState.receiver; receiversTracker.untrack(lastSessionReceiver); } receiversTracker.track(nextSessionReceiver); return nextSessionReceiver; } private void handleMessage(Message qpidMessage) { final ServiceBusReceivedMessage message = serializer.deserialize(qpidMessage, ServiceBusReceivedMessage.class); instrumentation.instrumentProcess(message, ReceiverKind.PROCESSOR, msg -> { logger.atVerbose() .addKeyValue(SESSION_ID_KEY, message.getSessionId()) .addKeyValue(MESSAGE_ID_LOGGING_KEY, message.getMessageId()) .log("Received message."); final Throwable error = notifyMessage(msg); if (enableAutoDisposition) { if (error == null) { complete(msg); } else { abandon(msg); } } return error; }); } private Throwable notifyMessage(ServiceBusReceivedMessage message) { try { processMessage.accept( new ServiceBusReceivedMessageContext(receiversTracker, new ServiceBusMessageContext(message))); } catch (Exception e) { notifyError(new ServiceBusException(e, ServiceBusErrorSource.USER_CALLBACK)); return e; } return null; } private void notifyError(Throwable throwable) { try { processError.accept(new ServiceBusErrorContext(throwable, fullyQualifiedNamespace, entityPath)); } catch (Exception e) { logger.atVerbose().log("Ignoring error from user processError handler.", e); } } private void complete(ServiceBusReceivedMessage message) { try { receiversTracker.complete(message).block(); } catch (Exception e) { logger.atVerbose().log("Failed to complete message", e); } } private void abandon(ServiceBusReceivedMessage message) { try { receiversTracker.abandon(message).block(); } catch (Exception e) { logger.atVerbose().log("Failed to abandon message", e); } } /** * A type which provide a Flux {@link NextSessionStream * Each {@link RollingSessionReceiver} has a {@link NextSessionStream} instance associated, * when the {@link RollingSessionReceiver} wants to roll to a new session, it requests next session from the Flux. * * <p>Underneath, all the {@link NextSessionStream} instances (across all {@link RollingSessionReceiver}) shares * the common Mono {@link NextSession */ private static final class NextSessionStream extends AtomicBoolean { private final long pumpId; private final int rollerId; private final String fullyQualifiedNamespace; private final String entityPath; private final Mono<ServiceBusSessionAcquirer.Session> newSession; NextSessionStream(long pumpId, int rollerId, String fullyQualifiedNamespace, String entityPath, Mono<ServiceBusSessionAcquirer.Session> nextSession) { super(false); this.pumpId = pumpId; this.rollerId = rollerId; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.newSession = Mono.defer(() -> { final boolean isTerminated = super.get(); if (isTerminated) { return Mono.error(new MessagePumpTerminatedException(this.pumpId, this.fullyQualifiedNamespace, this.entityPath, "session } else { return nextSession; } }).map(session -> { final boolean isTerminated = super.get(); if (isTerminated) { session.getLink().closeAsync().subscribe(); throw new MessagePumpTerminatedException(this.pumpId, this.fullyQualifiedNamespace, this.entityPath, "session } return session; }); } Flux<ServiceBusSessionAcquirer.Session> flux() { return nonEagerRepeat(newSession); } void close() { super.set(true); } private static Flux<ServiceBusSessionAcquirer.Session> nonEagerRepeat(Mono<ServiceBusSessionAcquirer.Session> source) { return source .cacheInvalidateIf(cachedSession -> cachedSession.getLink().isDisposed()) .repeat() .filter(session -> !session.getLink().isDisposed()); } } /** * A Function that when called, invokes {@link Message} handler using a Worker thread from a {@link Scheduler}. */ private static final class RunOnWorker implements Function<Message, Publisher<Void>> { private final Consumer<Message> handleMessage; private final Scheduler workerScheduler; /** * Instantiate {@link RunOnWorker} to run the given {@code handleMessage} handler using a Worker * from the provided {@code workerScheduler}. * * @param handleMessage The message handler. * @param workerScheduler The Scheduler hosting the Worker to run the message handler. */ RunOnWorker(Consumer<Message> handleMessage, Scheduler workerScheduler) { this.handleMessage = handleMessage; this.workerScheduler = workerScheduler; } @Override public Mono<Void> apply(Message qpidMessage) { return Mono.<Void>fromRunnable(() -> { handleMessage.accept(qpidMessage); }).subscribeOn(workerScheduler); } } } /** * Tracks the running {@link ServiceBusSessionReactorReceiver} instances, each backing a RollingSessionReceiver instance. * Each time a RollingSessionReceiver rolls to a new ServiceBusSessionReactorReceiver Rn, it will track Rn by invoking * tack(Rn) and un-tracks the last (closed) ServiceBusSessionReactorReceiver Rm by invoking untrack(Rm). * * <p>The type holds sessionId to ServiceBusSessionReactorReceiver mapping. A session message can be disposition only on * the ServiceBusSessionReactorReceiver delivered it. The mapping tracked by this type enables looking up * the ServiceBusSessionReactorReceiver when a message needs to be disposition.</p> * * <p>It is possible that, a session say session-1 gets acquired by RollingSessionReceiver Ru, * while a RollingSessionReceiver Rv that was previously connected to session-1 rolls to session-2. Measures are * taken to ensure the Rv is not removing (un-track) session-1 tracked by Ru from the shared view in such concurrent * case, the underlying {@link ConcurrentHashMap */ static final class SessionReceiversTracker { private final ClientLogger logger; private final String fullyQualifiedNamespace; private final String entityPath; private final ServiceBusReceiveMode receiveMode; private final ConcurrentHashMap<String, ServiceBusSessionReactorReceiver> receivers; private final ServiceBusReceiverInstrumentation instrumentation; private SessionReceiversTracker(ClientLogger logger, int size, String fullyQualifiedNamespace, String entityPath, ServiceBusReceiveMode receiveMode, ServiceBusReceiverInstrumentation instrumentation) { this.logger = logger; this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.entityPath = entityPath; this.receiveMode = receiveMode; this.receivers = new ConcurrentHashMap<>(size); this.instrumentation = instrumentation; } private void track(ServiceBusSessionReactorReceiver receiver) { receivers.put(receiver.getSessionId(), receiver); } private void untrack(ServiceBusSessionReactorReceiver receiver) { receivers.remove(receiver.getSessionId(), receiver); } private void clear() { receivers.clear(); } String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } String getEntityPath() { return entityPath; } Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.SUSPENDED, null, null, null, null); } Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.ABANDONED, options.getPropertiesToModify(), null, null, options.getTransactionContext()); } Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getPropertiesToModify(), options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getTransactionContext()); } Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { Mono<Void> nullError = checkNull(options, options != null ? options.getTransactionContext() : null); if (nullError != null) { return nullError; } return updateDisposition(message, DispositionStatus.DEFERRED, options.getPropertiesToModify(), null, null, options.getTransactionContext()); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { if (receiveMode != ServiceBusReceiveMode.PEEK_LOCK) { final String m = String.format("'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus); return Mono.error(new UnsupportedOperationException(m)); } else if (message.isSettled()) { final String m = "The message has either been deleted or already settled."; return Mono.error(new IllegalArgumentException(m)); } else if (message.getLockToken() == null) { final String m = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error(new UnsupportedOperationException(m)); } final String sessionId = message.getSessionId(); final ServiceBusSessionReactorReceiver receiver = receivers.get(sessionId); final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); Mono<Void> updateDispositionMono; if (receiver != null) { updateDispositionMono = receiver.updateDisposition(message.getLockToken(), deliveryState); } else { updateDispositionMono = Mono.error(DeliveryNotOnLinkException.noMatchingDelivery(message.getLockToken(), deliveryState)); } return instrumentation.instrumentSettlement(updateDispositionMono, message, message.getContext(), dispositionStatus); } private Mono<Void> checkNull(Object options, ServiceBusTransactionContext transactionContext) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } if (transactionContext != null && transactionContext.getTransactionId() == null) { return monoError(logger, new NullPointerException("'options.transactionContext.transactionId' cannot be null.")); } return null; } } private static final class State<T extends AsyncCloseable> { final T receiver; State(T receiver) { this.receiver = Objects.requireNonNull(receiver); } static <T extends AsyncCloseable> State<T> init() { return new State<>(); } static <T extends AsyncCloseable> State<T> terminated() { return new State<>(); } private State() { this.receiver = null; } } /** * The signal that triggered {@link SessionsMessagePump} and {@link RollingSessionReceiver} termination. */ private enum TerminalSignalType { COMPLETED, ERRORED, CANCELED, } }
what if every time we refresh the location we also update the preferredLocations to be effectivePreferredLocations within the locationInfo, then we might do not need to specialize the handling when connectionPolicy.preferredRegions being empty situation in this method and the following methods
public boolean shouldRefreshEndpoints(Utils.ValueHolder<Boolean> canRefreshInBackground) { canRefreshInBackground.v = true; DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; String mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.preferredLocations); String mostPreferredEffectiveReadLocation = StringUtils.EMPTY; String mostPreferredEffectiveWriteLocation = StringUtils.EMPTY; if (StringUtils.isEmpty(mostPreferredLocation)) { URI firstAccountLevelReadEndpoint = Utils.firstOrDefault(currentLocationInfo.readEndpoints); if (currentLocationInfo.regionNameByReadEndpoint != null) { mostPreferredEffectiveReadLocation = currentLocationInfo.regionNameByReadEndpoint.getOrDefault(firstAccountLevelReadEndpoint, StringUtils.EMPTY); } URI firstAccountLevelWriteEndpoint = Utils.firstOrDefault(currentLocationInfo.writeEndpoints); if (currentLocationInfo.regionNameByWriteEndpoint != null) { mostPreferredEffectiveWriteLocation = currentLocationInfo.regionNameByWriteEndpoint.getOrDefault(firstAccountLevelWriteEndpoint, StringUtils.EMPTY); } } String mostPreferredReadLocationToUse = !Strings.isNullOrEmpty(mostPreferredLocation) ? mostPreferredLocation : mostPreferredEffectiveReadLocation; String mostPreferredWriteLocationToUse = !Strings.isNullOrEmpty(mostPreferredLocation) ? mostPreferredLocation : mostPreferredEffectiveWriteLocation; if (this.enableEndpointDiscovery) { boolean shouldRefresh = this.useMultipleWriteLocations && !this.enableMultipleWriteLocations; List<URI> readLocationEndpoints = currentLocationInfo.readEndpoints; if (this.isEndpointUnavailable(readLocationEndpoints.get(0), OperationType.Read)) { canRefreshInBackground.v = anyEndpointsAvailable(readLocationEndpoints, OperationType.Read); logger.debug("shouldRefreshEndpoints = true, since the first read endpoint " + "[{}] is not available for read. canRefreshInBackground = [{}]", readLocationEndpoints.get(0), canRefreshInBackground.v); return true; } if (!Strings.isNullOrEmpty(mostPreferredReadLocationToUse)) { Utils.ValueHolder<URI> mostPreferredReadEndpointHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<URI> firstAccountLevelReadEndpointHolder = new Utils.ValueHolder<>(); logger.debug("getReadEndpoints [{}]", readLocationEndpoints); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, mostPreferredReadLocationToUse, mostPreferredReadEndpointHolder)) { logger.debug("most preferred is [{}], most preferred available is [{}]", mostPreferredReadLocationToUse, mostPreferredReadEndpointHolder.v); if (!areEqual(mostPreferredReadEndpointHolder.v, readLocationEndpoints.get(0))) { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}]" + " is not available for read.", mostPreferredReadLocationToUse); return true; } else if (currentLocationInfo.preferredLocations == null || currentLocationInfo.preferredLocations.isEmpty()) { List<String> accountLevelReadRegions = currentLocationInfo.availableReadLocations; String firstAccountLevelReadRegion = Utils.firstOrDefault(accountLevelReadRegions); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, firstAccountLevelReadRegion, firstAccountLevelReadEndpointHolder)) { if (!areEqual(mostPreferredReadEndpointHolder.v, firstAccountLevelReadEndpointHolder.v)) { logger.debug("shouldRefreshEndpoints = true, preferredRegions on client is empty and first account-level read region [{}]" + " is not available for read.", firstAccountLevelReadRegion); return true; } } } logger.debug("most preferred is [{}], and most preferred available [{}] are the same", mostPreferredLocation, mostPreferredReadEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] " + "is not in available read locations.", mostPreferredLocation); return true; } } Utils.ValueHolder<URI> mostPreferredWriteEndpointHolder = new Utils.ValueHolder<>(); List<URI> writeLocationEndpoints = currentLocationInfo.writeEndpoints; logger.debug("getWriteEndpoints [{}]", writeLocationEndpoints); if (!this.canUseMultipleWriteLocations()) { if (this.isEndpointUnavailable(writeLocationEndpoints.get(0), OperationType.Write)) { canRefreshInBackground.v = anyEndpointsAvailable(writeLocationEndpoints,OperationType.Write); logger.debug("shouldRefreshEndpoints = true, most preferred location " + "[{}] endpoint [{}] is not available for write. canRefreshInBackground = [{}]", mostPreferredLocation, writeLocationEndpoints.get(0), canRefreshInBackground.v); return true; } else { logger.debug("shouldRefreshEndpoints: false, [{}] is available for Write", writeLocationEndpoints.get(0)); return shouldRefresh; } } else if (!Strings.isNullOrEmpty(mostPreferredWriteLocationToUse)) { if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, mostPreferredWriteLocationToUse, mostPreferredWriteEndpointHolder)) { shouldRefresh = ! areEqual(mostPreferredWriteEndpointHolder.v, writeLocationEndpoints.get(0)); if (currentLocationInfo.preferredLocations == null || currentLocationInfo.preferredLocations.isEmpty()) { List<String> accountLevelWriteRegions = currentLocationInfo.availableWriteLocations; String firstAccountLevelWriteRegion = Utils.firstOrDefault(accountLevelWriteRegions); Utils.ValueHolder<URI> firstAccountLevelWriteEndpointHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, firstAccountLevelWriteRegion, firstAccountLevelWriteEndpointHolder)) { if (!areEqual(mostPreferredWriteEndpointHolder.v, firstAccountLevelWriteEndpointHolder.v)) { logger.debug("shouldRefreshEndpoints = true, preferredRegions on client is empty and first account-level write region [{}]" + " is not available for write.", firstAccountLevelWriteRegion); shouldRefresh = true; } } } if (shouldRefresh) { logger.debug("shouldRefreshEndpoints: true, write endpoint [{}] is not the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints: false, write endpoint [{}] is the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } return shouldRefresh; } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] is not in available write locations", mostPreferredLocation); return true; } } else { logger.debug("shouldRefreshEndpoints: false, mostPreferredLocation [{}] is empty", mostPreferredLocation); return shouldRefresh; } } else { logger.debug("shouldRefreshEndpoints: false, endpoint discovery not enabled"); return false; } }
String mostPreferredEffectiveReadLocation = StringUtils.EMPTY;
public boolean shouldRefreshEndpoints(Utils.ValueHolder<Boolean> canRefreshInBackground) { canRefreshInBackground.v = true; DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; String mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.preferredLocations); if (StringUtils.isEmpty(mostPreferredLocation)) { mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.effectivePreferredLocations); } if (this.enableEndpointDiscovery) { boolean shouldRefresh = this.useMultipleWriteLocations && !this.enableMultipleWriteLocations; List<URI> readLocationEndpoints = currentLocationInfo.readEndpoints; if (this.isEndpointUnavailable(readLocationEndpoints.get(0), OperationType.Read)) { canRefreshInBackground.v = anyEndpointsAvailable(readLocationEndpoints,OperationType.Read); logger.debug("shouldRefreshEndpoints = true, since the first read endpoint " + "[{}] is not available for read. canRefreshInBackground = [{}]", readLocationEndpoints.get(0), canRefreshInBackground.v); return true; } if (!Strings.isNullOrEmpty(mostPreferredLocation)) { Utils.ValueHolder<URI> mostPreferredReadEndpointHolder = new Utils.ValueHolder<>(); logger.debug("getReadEndpoints [{}]", readLocationEndpoints); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, mostPreferredLocation, mostPreferredReadEndpointHolder)) { logger.debug("most preferred is [{}], most preferred available is [{}]", mostPreferredLocation, mostPreferredReadEndpointHolder.v); if (!areEqual(mostPreferredReadEndpointHolder.v, readLocationEndpoints.get(0))) { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}]" + " is not available for read.", mostPreferredLocation); return true; } logger.debug("most preferred is [{}], and most preferred available [{}] are the same", mostPreferredLocation, mostPreferredReadEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] " + "is not in available read locations.", mostPreferredLocation); return true; } } Utils.ValueHolder<URI> mostPreferredWriteEndpointHolder = new Utils.ValueHolder<>(); List<URI> writeLocationEndpoints = currentLocationInfo.writeEndpoints; logger.debug("getWriteEndpoints [{}]", writeLocationEndpoints); if (!this.canUseMultipleWriteLocations()) { if (this.isEndpointUnavailable(writeLocationEndpoints.get(0), OperationType.Write)) { canRefreshInBackground.v = anyEndpointsAvailable(writeLocationEndpoints,OperationType.Write); logger.debug("shouldRefreshEndpoints = true, most preferred location " + "[{}] endpoint [{}] is not available for write. canRefreshInBackground = [{}]", mostPreferredLocation, writeLocationEndpoints.get(0), canRefreshInBackground.v); return true; } else { logger.debug("shouldRefreshEndpoints: false, [{}] is available for Write", writeLocationEndpoints.get(0)); return shouldRefresh; } } else if (!Strings.isNullOrEmpty(mostPreferredLocation)) { if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, mostPreferredLocation, mostPreferredWriteEndpointHolder)) { shouldRefresh = ! areEqual(mostPreferredWriteEndpointHolder.v,writeLocationEndpoints.get(0)); if (shouldRefresh) { logger.debug("shouldRefreshEndpoints: true, write endpoint [{}] is not the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints: false, write endpoint [{}] is the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } return shouldRefresh; } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] is not in available write locations", mostPreferredLocation); return true; } } else { logger.debug("shouldRefreshEndpoints: false, mostPreferredLocation [{}] is empty", mostPreferredLocation); return shouldRefresh; } } else { logger.debug("shouldRefreshEndpoints: false, endpoint discovery not enabled"); return false; } }
class LocationCache { private final static Logger logger = LoggerFactory.getLogger(LocationCache.class); private final boolean enableEndpointDiscovery; private final URI defaultEndpoint; private final boolean useMultipleWriteLocations; private final Object lockObject; private final Duration unavailableLocationsExpirationTime; private final ConcurrentHashMap<URI, LocationUnavailabilityInfo> locationUnavailabilityInfoByEndpoint; private final ConnectionPolicy connectionPolicy; private DatabaseAccountLocationsInfo locationInfo; private Instant lastCacheUpdateTimestamp; private boolean enableMultipleWriteLocations; public LocationCache( ConnectionPolicy connectionPolicy, URI defaultEndpoint, Configs configs) { List<String> preferredLocations = new ArrayList<>(connectionPolicy.getPreferredRegions() != null ? connectionPolicy.getPreferredRegions() : Collections.emptyList() ); this.locationInfo = new DatabaseAccountLocationsInfo(preferredLocations, defaultEndpoint); this.defaultEndpoint = defaultEndpoint; this.enableEndpointDiscovery = connectionPolicy.isEndpointDiscoveryEnabled(); this.useMultipleWriteLocations = connectionPolicy.isMultipleWriteRegionsEnabled(); this.lockObject = new Object(); this.locationUnavailabilityInfoByEndpoint = new ConcurrentHashMap<>(); this.lastCacheUpdateTimestamp = Instant.MIN; this.enableMultipleWriteLocations = false; this.unavailableLocationsExpirationTime = Duration.ofSeconds(configs.getUnavailableLocationsExpirationTimeInSeconds()); this.connectionPolicy = connectionPolicy; } /** * Gets list of read endpoints ordered by * * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getReadEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.readEndpoints; } /** * Gets list of write endpoints ordered by * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getWriteEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.writeEndpoints; } /*** * Get the list of available read endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableReadEndpoints() { return this.locationInfo.availableReadEndpointByLocation.values().stream().collect(Collectors.toList()); } /*** * Get the list of available write endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableWriteEndpoints() { return this.locationInfo.availableWriteEndpointByLocation.values().stream().collect(Collectors.toList()); } /** * Marks the current location unavailable for read */ public void markEndpointUnavailableForRead(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Read); } /** * Marks the current location unavailable for write */ public void markEndpointUnavailableForWrite(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Write); } /** * Invoked when {@link DatabaseAccount} is read * @param databaseAccount READ DatabaseAccount */ public void onDatabaseAccountRead(DatabaseAccount databaseAccount) { this.updateLocationCache( databaseAccount.getWritableLocations(), databaseAccount.getReadableLocations(), null, BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); } void onLocationPreferenceChanged(UnmodifiableList<String> preferredLocations) { this.updateLocationCache( null, null , preferredLocations, null); } /** * Resolves request to service endpoint. * 1. If this is a write request * (a) If UseMultipleWriteLocations = true * (i) For document writes, resolve to most preferred and available write endpoint. * Once the endpoint is marked unavailable, it is moved to the end of available write endpoint. Current request will * be retried on next preferred available write endpoint. * (ii) For all other resources, always resolve to first/second (regardless of preferred locations) * write getEndpoint in {@link DatabaseAccount * Endpoint of first write location in {@link DatabaseAccount * write operation on all resource types (except during that region's failover). * Only during manual failover, client would retry write on second write location in {@link DatabaseAccount * (b) Else resolve the request to first write getEndpoint in {@link DatabaseAccount * second write getEndpoint in {@link DatabaseAccount * 2. Else resolve the request to most preferred available read getEndpoint (getAutomatic failover for read requests) * @param request Request for which getEndpoint is to be resolved * @return Resolved getEndpoint */ public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { Objects.requireNonNull(request.requestContext, "RxDocumentServiceRequest.requestContext is required and cannot be null."); if(request.requestContext.locationEndpointToRoute != null) { return request.requestContext.locationEndpointToRoute; } int locationIndex = Utils.getValueOrDefault(request.requestContext.locationIndexToRoute, 0); boolean usePreferredLocations = request.requestContext.usePreferredLocations != null ? request.requestContext.usePreferredLocations : true; if(!usePreferredLocations || (request.getOperationType().isWriteOperation() && !this.canUseMultipleWriteLocations(request))) { DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if(this.enableEndpointDiscovery && currentLocationInfo.availableWriteLocations.size() > 0) { locationIndex = Math.min(locationIndex%2, currentLocationInfo.availableWriteLocations.size()-1); String writeLocation = currentLocationInfo.availableWriteLocations.get(locationIndex); return currentLocationInfo.availableWriteEndpointByLocation.get(writeLocation); } else { return this.defaultEndpoint; } } else { UnmodifiableList<URI> endpoints = request.getOperationType().isWriteOperation()? this.getApplicableWriteEndpoints(request) : this.getApplicableReadEndpoints(request); return endpoints.get(locationIndex % endpoints.size()); } } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.getApplicableWriteEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> writeEndpoints = this.getWriteEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return writeEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( writeEndpoints, this.locationInfo.regionNameByWriteEndpoint, this.defaultEndpoint, effectiveExcludedRegionsWithPartitionUnavailableRegions); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.getApplicableReadEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> readEndpoints = this.getReadEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return readEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( readEndpoints, this.locationInfo.regionNameByReadEndpoint, this.locationInfo.writeEndpoints.get(0), effectiveExcludedRegionsWithPartitionUnavailableRegions); } private UnmodifiableList<URI> getApplicableEndpoints( UnmodifiableList<URI> endpoints, UnmodifiableMap<URI, String> regionNameByEndpoint, URI fallbackEndpoint, List<String> excludeRegionList) { List<URI> applicableEndpoints = new ArrayList<>(); for (URI endpoint : endpoints) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (Utils.tryGetValue(regionNameByEndpoint, endpoint, regionName)) { if (!excludeRegionList.stream().anyMatch(regionName.v::equalsIgnoreCase)) { applicableEndpoints.add(endpoint); } } } if (applicableEndpoints.isEmpty()) { applicableEndpoints.add(fallbackEndpoint); } return new UnmodifiableList<>(applicableEndpoints); } private boolean isExcludeRegionsConfigured(List<String> excludedRegionsOnRequest, List<String> excludedRegionsOnClient) { boolean isExcludedRegionsConfiguredOnRequest = !(excludedRegionsOnRequest == null || excludedRegionsOnRequest.isEmpty()); boolean isExcludedRegionsConfiguredOnClient = !(excludedRegionsOnClient == null || excludedRegionsOnClient.isEmpty()); return isExcludedRegionsConfiguredOnRequest || isExcludedRegionsConfiguredOnClient; } public URI resolveFaultInjectionEndpoint(String region, boolean writeOnly) { Utils.ValueHolder<URI> endpointValueHolder = new Utils.ValueHolder<>(); if (writeOnly) { Utils.tryGetValue(this.locationInfo.availableWriteEndpointByLocation, region, endpointValueHolder); } else { Utils.tryGetValue(this.locationInfo.availableReadEndpointByLocation, region, endpointValueHolder); } if (endpointValueHolder.v != null) { return endpointValueHolder.v; } throw new IllegalArgumentException("Can not find service endpoint for region " + region); } public URI getDefaultEndpoint() { return this.defaultEndpoint; } public String getRegionName(URI locationEndpoint, com.azure.cosmos.implementation.OperationType operationType) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (operationType.isWriteOperation()) { if (Utils.tryGetValue(this.locationInfo.regionNameByWriteEndpoint, locationEndpoint, regionName)) { return regionName.v; } } else { if (Utils.tryGetValue(this.locationInfo.regionNameByReadEndpoint, locationEndpoint, regionName)) { return regionName.v; } } return this.locationInfo.availableWriteLocations.get(0).toLowerCase(Locale.ROOT); } private boolean areEqual(URI url1, URI url2) { return url1.equals(url2); } private void clearStaleEndpointUnavailabilityInfo() { if (!this.locationUnavailabilityInfoByEndpoint.isEmpty()) { List<URI> unavailableEndpoints = new ArrayList<>(this.locationUnavailabilityInfoByEndpoint.keySet()); for (URI unavailableEndpoint: unavailableEndpoints) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<LocationUnavailabilityInfo> removedHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, unavailabilityInfoHolder) && durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime) && Utils.tryRemove(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, removedHolder)) { logger.debug( "Removed endpoint [{}] unavailable for operations [{}] from unavailableEndpoints", unavailableEndpoint, unavailabilityInfoHolder.v.unavailableOperations); } } } } private boolean isEndpointUnavailable(URI endpoint, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); if (expectedAvailableOperations == OperationType.None || !Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, endpoint, unavailabilityInfoHolder) || !unavailabilityInfoHolder.v.unavailableOperations.supports(expectedAvailableOperations)) { return false; } else { if (durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime)) { return false; } else { logger.debug( "Endpoint [{}] unavailable for operations [{}] present in unavailableEndpoints", endpoint, unavailabilityInfoHolder.v.unavailableOperations); return true; } } } private boolean anyEndpointsAvailable(List<URI> endpoints, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); boolean anyEndpointsAvailable = false; for (URI endpoint : endpoints) { if (!isEndpointUnavailable(endpoint, expectedAvailableOperations)) { anyEndpointsAvailable = true; break; } } return anyEndpointsAvailable; } private void markEndpointUnavailable( URI unavailableEndpoint, OperationType unavailableOperationType) { Instant currentTime = Instant.now(); LocationUnavailabilityInfo updatedInfo = this.locationUnavailabilityInfoByEndpoint.compute( unavailableEndpoint, new BiFunction<URI, LocationUnavailabilityInfo, LocationUnavailabilityInfo>() { @Override public LocationUnavailabilityInfo apply(URI url, LocationUnavailabilityInfo info) { if (info == null) { return new LocationUnavailabilityInfo(currentTime, unavailableOperationType); } else { info.lastUnavailabilityCheckTimeStamp = currentTime; info.unavailableOperations = OperationType.combine(info.unavailableOperations, unavailableOperationType); return info; } } }); this.updateLocationCache(); logger.debug( "Endpoint [{}] unavailable for [{}] added/updated to unavailableEndpoints with timestamp [{}]", unavailableEndpoint, unavailableOperationType, updatedInfo.lastUnavailabilityCheckTimeStamp); } private void updateLocationCache(){ updateLocationCache(null, null, null, null); } private void updateLocationCache( Iterable<DatabaseAccountLocation> writeLocations, Iterable<DatabaseAccountLocation> readLocations, UnmodifiableList<String> preferenceList, Boolean enableMultipleWriteLocations) { synchronized (this.lockObject) { DatabaseAccountLocationsInfo nextLocationInfo = new DatabaseAccountLocationsInfo(this.locationInfo); logger.debug("updating location cache ..., current readLocations [{}], current writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); if (preferenceList != null) { nextLocationInfo.preferredLocations = preferenceList; } if (enableMultipleWriteLocations != null) { this.enableMultipleWriteLocations = enableMultipleWriteLocations; } this.clearStaleEndpointUnavailabilityInfo(); if (readLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableReadLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outReadRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByReadEndpoint); nextLocationInfo.availableReadEndpointByLocation = this.getEndpointByLocation(readLocations, out, outReadRegionMap); nextLocationInfo.availableReadLocations = out.v; nextLocationInfo.regionNameByReadEndpoint = outReadRegionMap.v; } if (writeLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableWriteLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outWriteRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByWriteEndpoint); nextLocationInfo.availableWriteEndpointByLocation = this.getEndpointByLocation(writeLocations, out, outWriteRegionMap); nextLocationInfo.availableWriteLocations = out.v; nextLocationInfo.regionNameByWriteEndpoint = outWriteRegionMap.v; } nextLocationInfo.writeEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableWriteEndpointByLocation, nextLocationInfo.availableWriteLocations, OperationType.Write, this.defaultEndpoint); nextLocationInfo.readEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableReadEndpointByLocation, nextLocationInfo.availableReadLocations, OperationType.Read, nextLocationInfo.writeEndpoints.get(0)); this.lastCacheUpdateTimestamp = Instant.now(); logger.debug("updating location cache finished, new readLocations [{}], new writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); this.locationInfo = nextLocationInfo; } } private UnmodifiableList<URI> getPreferredAvailableEndpoints(UnmodifiableMap<String, URI> endpointsByLocation, UnmodifiableList<String> orderedLocations, OperationType expectedAvailableOperation, URI fallbackEndpoint) { List<URI> endpoints = new ArrayList<>(); DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if (this.enableEndpointDiscovery) { if (this.canUseMultipleWriteLocations() || expectedAvailableOperation.supports(OperationType.Read)) { List<URI> unavailableEndpoints = new ArrayList<>(); if (currentLocationInfo.preferredLocations != null && !currentLocationInfo.preferredLocations.isEmpty()) { for (String location: currentLocationInfo.preferredLocations) { Utils.ValueHolder<URI> endpoint = new Utils.ValueHolder<>(); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.defaultEndpoint.equals(endpoint.v)) { endpoints = new ArrayList<>(); break; } if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } endpoints.addAll(unavailableEndpoints); } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (!Strings.isNullOrEmpty(location) && Utils.tryGetValue(endpointsByLocation, location, endpoint)) { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } return new UnmodifiableList<URI>(endpoints); } private UnmodifiableMap<String, URI> getEndpointByLocation(Iterable<DatabaseAccountLocation> locations, Utils.ValueHolder<UnmodifiableList<String>> orderedLocations, Utils.ValueHolder<UnmodifiableMap<URI, String>> regionMap) { Map<String, URI> endpointsByLocation = new CaseInsensitiveMap<>(); Map<URI, String> regionByEndpoint = new CaseInsensitiveMap<>(); List<String> parsedLocations = new ArrayList<>(); for (DatabaseAccountLocation location: locations) { if (!Strings.isNullOrEmpty(location.getName())) { try { URI endpoint = new URI(location.getEndpoint().toLowerCase(Locale.ROOT)); endpointsByLocation.put(location.getName().toLowerCase(Locale.ROOT), endpoint); regionByEndpoint.put(endpoint, location.getName().toLowerCase(Locale.ROOT)); parsedLocations.add(location.getName()); } catch (Exception e) { logger.warn("GetAvailableEndpointsByLocation() - skipping add for location = [{}] as it is location name is either empty or endpoint is malformed [{}]", location.getName(), location.getEndpoint()); } } } orderedLocations.v = new UnmodifiableList<String>(parsedLocations); regionMap.v = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(regionByEndpoint); return (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(endpointsByLocation); } public boolean canUseMultipleWriteLocations() { return this.useMultipleWriteLocations && this.enableMultipleWriteLocations; } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.canUseMultipleWriteLocations() && (request.getResourceType() == ResourceType.Document || (request.getResourceType() == ResourceType.StoredProcedure && request.getOperationType() == com.azure.cosmos.implementation.OperationType.ExecuteJavaScript)); } private static class LocationUnavailabilityInfo { LocationUnavailabilityInfo(Instant instant, OperationType type) { this.lastUnavailabilityCheckTimeStamp = instant; this.unavailableOperations = type; } public Instant lastUnavailabilityCheckTimeStamp; public OperationType unavailableOperations; } private enum OperationType { None(0x0), Read(0x1), Write(0x2), ReadAndWrite(0x3); private final int flag; public boolean hasReadFlag() { return (flag & Read.flag) != 0; } public boolean hasWriteFlag() { return (flag & Write.flag) != 0; } public static OperationType combine(OperationType t1, OperationType t2) { switch (t1.flag | t2.flag) { case 0x0: return None; case 0x1: return Read; case 0x2: return Write; default: return ReadAndWrite; } } public boolean supports(OperationType type) { return (flag & type.flag) != 0; } OperationType(int flag) { this.flag = flag; } } private boolean durationPassed(Instant end, Instant start, Duration duration) { return end.minus(duration).isAfter(start); } private boolean unavailableLocationsExpirationTimePassed() { return durationPassed(Instant.now(), this.lastCacheUpdateTimestamp, this.unavailableLocationsExpirationTime); } private static boolean isExcludedRegionsSupplierConfigured(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) { return excludedRegionsSupplier != null && excludedRegionsSupplier.get() != null; } static class DatabaseAccountLocationsInfo { private UnmodifiableList<String> preferredLocations; private UnmodifiableList<String> availableWriteLocations; private UnmodifiableList<String> availableReadLocations; private UnmodifiableMap<String, URI> availableWriteEndpointByLocation; private UnmodifiableMap<String, URI> availableReadEndpointByLocation; private UnmodifiableMap<URI, String> regionNameByWriteEndpoint; private UnmodifiableMap<URI, String> regionNameByReadEndpoint; private UnmodifiableList<URI> writeEndpoints; private UnmodifiableList<URI> readEndpoints; public DatabaseAccountLocationsInfo(List<String> preferredLocations, URI defaultEndpoint) { this.preferredLocations = new UnmodifiableList<>(preferredLocations.stream().map(loc -> loc.toLowerCase(Locale.ROOT)).collect(Collectors.toList())); this.availableWriteEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByWriteEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByReadEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteLocations = new UnmodifiableList<>(Collections.emptyList()); this.readEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); this.writeEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); } public DatabaseAccountLocationsInfo(DatabaseAccountLocationsInfo other) { this.preferredLocations = other.preferredLocations; this.availableWriteLocations = other.availableWriteLocations; this.availableReadLocations = other.availableReadLocations; this.availableWriteEndpointByLocation = other.availableWriteEndpointByLocation; this.regionNameByWriteEndpoint = other.regionNameByWriteEndpoint; this.regionNameByReadEndpoint = other.regionNameByReadEndpoint; this.availableReadEndpointByLocation = other.availableReadEndpointByLocation; this.writeEndpoints = other.writeEndpoints; this.readEndpoints = other.readEndpoints; } } }
class LocationCache { private final static Logger logger = LoggerFactory.getLogger(LocationCache.class); private final boolean enableEndpointDiscovery; private final URI defaultEndpoint; private final boolean useMultipleWriteLocations; private final Object lockObject; private final Duration unavailableLocationsExpirationTime; private final ConcurrentHashMap<URI, LocationUnavailabilityInfo> locationUnavailabilityInfoByEndpoint; private final ConnectionPolicy connectionPolicy; private DatabaseAccountLocationsInfo locationInfo; private Instant lastCacheUpdateTimestamp; private boolean enableMultipleWriteLocations; public LocationCache( ConnectionPolicy connectionPolicy, URI defaultEndpoint, Configs configs) { List<String> preferredLocations = new ArrayList<>(connectionPolicy.getPreferredRegions() != null ? connectionPolicy.getPreferredRegions() : Collections.emptyList() ); this.locationInfo = new DatabaseAccountLocationsInfo(preferredLocations, defaultEndpoint); this.defaultEndpoint = defaultEndpoint; this.enableEndpointDiscovery = connectionPolicy.isEndpointDiscoveryEnabled(); this.useMultipleWriteLocations = connectionPolicy.isMultipleWriteRegionsEnabled(); this.lockObject = new Object(); this.locationUnavailabilityInfoByEndpoint = new ConcurrentHashMap<>(); this.lastCacheUpdateTimestamp = Instant.MIN; this.enableMultipleWriteLocations = false; this.unavailableLocationsExpirationTime = Duration.ofSeconds(configs.getUnavailableLocationsExpirationTimeInSeconds()); this.connectionPolicy = connectionPolicy; } /** * Gets list of read endpoints ordered by * * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getReadEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.readEndpoints; } /** * Gets list of write endpoints ordered by * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getWriteEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.writeEndpoints; } /*** * Get the list of available read endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableReadEndpoints() { return this.locationInfo.availableReadEndpointByLocation.values().stream().collect(Collectors.toList()); } /*** * Get the list of available write endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableWriteEndpoints() { return this.locationInfo.availableWriteEndpointByLocation.values().stream().collect(Collectors.toList()); } public List<String> getEffectivePreferredLocations() { return this.locationInfo.effectivePreferredLocations; } /** * Marks the current location unavailable for read */ public void markEndpointUnavailableForRead(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Read); } /** * Marks the current location unavailable for write */ public void markEndpointUnavailableForWrite(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Write); } /** * Invoked when {@link DatabaseAccount} is read * @param databaseAccount READ DatabaseAccount */ public void onDatabaseAccountRead(DatabaseAccount databaseAccount) { this.updateLocationCache( databaseAccount.getWritableLocations(), databaseAccount.getReadableLocations(), null, BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); } void onLocationPreferenceChanged(UnmodifiableList<String> preferredLocations) { this.updateLocationCache( null, null , preferredLocations, null); } /** * Resolves request to service endpoint. * 1. If this is a write request * (a) If UseMultipleWriteLocations = true * (i) For document writes, resolve to most preferred and available write endpoint. * Once the endpoint is marked unavailable, it is moved to the end of available write endpoint. Current request will * be retried on next preferred available write endpoint. * (ii) For all other resources, always resolve to first/second (regardless of preferred locations) * write getEndpoint in {@link DatabaseAccount * Endpoint of first write location in {@link DatabaseAccount * write operation on all resource types (except during that region's failover). * Only during manual failover, client would retry write on second write location in {@link DatabaseAccount * (b) Else resolve the request to first write getEndpoint in {@link DatabaseAccount * second write getEndpoint in {@link DatabaseAccount * 2. Else resolve the request to most preferred available read getEndpoint (getAutomatic failover for read requests) * @param request Request for which getEndpoint is to be resolved * @return Resolved getEndpoint */ public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { Objects.requireNonNull(request.requestContext, "RxDocumentServiceRequest.requestContext is required and cannot be null."); if(request.requestContext.locationEndpointToRoute != null) { return request.requestContext.locationEndpointToRoute; } int locationIndex = Utils.getValueOrDefault(request.requestContext.locationIndexToRoute, 0); boolean usePreferredLocations = request.requestContext.usePreferredLocations != null ? request.requestContext.usePreferredLocations : true; if(!usePreferredLocations || (request.getOperationType().isWriteOperation() && !this.canUseMultipleWriteLocations(request))) { DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if(this.enableEndpointDiscovery && currentLocationInfo.availableWriteLocations.size() > 0) { locationIndex = Math.min(locationIndex%2, currentLocationInfo.availableWriteLocations.size()-1); String writeLocation = currentLocationInfo.availableWriteLocations.get(locationIndex); return currentLocationInfo.availableWriteEndpointByLocation.get(writeLocation); } else { return this.defaultEndpoint; } } else { UnmodifiableList<URI> endpoints = request.getOperationType().isWriteOperation()? this.getApplicableWriteEndpoints(request) : this.getApplicableReadEndpoints(request); return endpoints.get(locationIndex % endpoints.size()); } } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.getApplicableWriteEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> writeEndpoints = this.getWriteEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return writeEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( writeEndpoints, this.locationInfo.regionNameByWriteEndpoint, this.defaultEndpoint, effectiveExcludedRegionsWithPartitionUnavailableRegions); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.getApplicableReadEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> readEndpoints = this.getReadEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return readEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( readEndpoints, this.locationInfo.regionNameByReadEndpoint, this.locationInfo.writeEndpoints.get(0), effectiveExcludedRegionsWithPartitionUnavailableRegions); } private UnmodifiableList<URI> getApplicableEndpoints( UnmodifiableList<URI> endpoints, UnmodifiableMap<URI, String> regionNameByEndpoint, URI fallbackEndpoint, List<String> excludeRegionList) { List<URI> applicableEndpoints = new ArrayList<>(); for (URI endpoint : endpoints) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (Utils.tryGetValue(regionNameByEndpoint, endpoint, regionName)) { if (!excludeRegionList.stream().anyMatch(regionName.v::equalsIgnoreCase)) { applicableEndpoints.add(endpoint); } } } if (applicableEndpoints.isEmpty()) { applicableEndpoints.add(fallbackEndpoint); } return new UnmodifiableList<>(applicableEndpoints); } private boolean isExcludeRegionsConfigured(List<String> excludedRegionsOnRequest, List<String> excludedRegionsOnClient) { boolean isExcludedRegionsConfiguredOnRequest = !(excludedRegionsOnRequest == null || excludedRegionsOnRequest.isEmpty()); boolean isExcludedRegionsConfiguredOnClient = !(excludedRegionsOnClient == null || excludedRegionsOnClient.isEmpty()); return isExcludedRegionsConfiguredOnRequest || isExcludedRegionsConfiguredOnClient; } public URI resolveFaultInjectionEndpoint(String region, boolean writeOnly) { Utils.ValueHolder<URI> endpointValueHolder = new Utils.ValueHolder<>(); if (writeOnly) { Utils.tryGetValue(this.locationInfo.availableWriteEndpointByLocation, region, endpointValueHolder); } else { Utils.tryGetValue(this.locationInfo.availableReadEndpointByLocation, region, endpointValueHolder); } if (endpointValueHolder.v != null) { return endpointValueHolder.v; } throw new IllegalArgumentException("Can not find service endpoint for region " + region); } public URI getDefaultEndpoint() { return this.defaultEndpoint; } public String getRegionName(URI locationEndpoint, com.azure.cosmos.implementation.OperationType operationType) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (operationType.isWriteOperation()) { if (Utils.tryGetValue(this.locationInfo.regionNameByWriteEndpoint, locationEndpoint, regionName)) { return regionName.v; } } else { if (Utils.tryGetValue(this.locationInfo.regionNameByReadEndpoint, locationEndpoint, regionName)) { return regionName.v; } } return this.locationInfo.availableWriteLocations.get(0).toLowerCase(Locale.ROOT); } private boolean areEqual(URI url1, URI url2) { return url1.equals(url2); } private void clearStaleEndpointUnavailabilityInfo() { if (!this.locationUnavailabilityInfoByEndpoint.isEmpty()) { List<URI> unavailableEndpoints = new ArrayList<>(this.locationUnavailabilityInfoByEndpoint.keySet()); for (URI unavailableEndpoint: unavailableEndpoints) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<LocationUnavailabilityInfo> removedHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, unavailabilityInfoHolder) && durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime) && Utils.tryRemove(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, removedHolder)) { logger.debug( "Removed endpoint [{}] unavailable for operations [{}] from unavailableEndpoints", unavailableEndpoint, unavailabilityInfoHolder.v.unavailableOperations); } } } } private boolean isEndpointUnavailable(URI endpoint, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); if (expectedAvailableOperations == OperationType.None || !Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, endpoint, unavailabilityInfoHolder) || !unavailabilityInfoHolder.v.unavailableOperations.supports(expectedAvailableOperations)) { return false; } else { if (durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime)) { return false; } else { logger.debug( "Endpoint [{}] unavailable for operations [{}] present in unavailableEndpoints", endpoint, unavailabilityInfoHolder.v.unavailableOperations); return true; } } } private boolean anyEndpointsAvailable(List<URI> endpoints, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); boolean anyEndpointsAvailable = false; for (URI endpoint : endpoints) { if (!isEndpointUnavailable(endpoint, expectedAvailableOperations)) { anyEndpointsAvailable = true; break; } } return anyEndpointsAvailable; } private void markEndpointUnavailable( URI unavailableEndpoint, OperationType unavailableOperationType) { Instant currentTime = Instant.now(); LocationUnavailabilityInfo updatedInfo = this.locationUnavailabilityInfoByEndpoint.compute( unavailableEndpoint, new BiFunction<URI, LocationUnavailabilityInfo, LocationUnavailabilityInfo>() { @Override public LocationUnavailabilityInfo apply(URI url, LocationUnavailabilityInfo info) { if (info == null) { return new LocationUnavailabilityInfo(currentTime, unavailableOperationType); } else { info.lastUnavailabilityCheckTimeStamp = currentTime; info.unavailableOperations = OperationType.combine(info.unavailableOperations, unavailableOperationType); return info; } } }); this.updateLocationCache(); logger.debug( "Endpoint [{}] unavailable for [{}] added/updated to unavailableEndpoints with timestamp [{}]", unavailableEndpoint, unavailableOperationType, updatedInfo.lastUnavailabilityCheckTimeStamp); } private void updateLocationCache(){ updateLocationCache(null, null, null, null); } private void updateLocationCache( Iterable<DatabaseAccountLocation> writeLocations, Iterable<DatabaseAccountLocation> readLocations, UnmodifiableList<String> preferenceList, Boolean enableMultipleWriteLocations) { synchronized (this.lockObject) { DatabaseAccountLocationsInfo nextLocationInfo = new DatabaseAccountLocationsInfo(this.locationInfo); logger.debug("updating location cache ..., current readLocations [{}], current writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); if (preferenceList != null) { nextLocationInfo.preferredLocations = preferenceList; } if (enableMultipleWriteLocations != null) { this.enableMultipleWriteLocations = enableMultipleWriteLocations; } this.clearStaleEndpointUnavailabilityInfo(); if (readLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableReadLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outReadRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByReadEndpoint); nextLocationInfo.availableReadEndpointByLocation = this.getEndpointByLocation(readLocations, out, outReadRegionMap); nextLocationInfo.availableReadLocations = out.v; nextLocationInfo.regionNameByReadEndpoint = outReadRegionMap.v; } if (writeLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableWriteLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outWriteRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByWriteEndpoint); nextLocationInfo.availableWriteEndpointByLocation = this.getEndpointByLocation(writeLocations, out, outWriteRegionMap); nextLocationInfo.availableWriteLocations = out.v; nextLocationInfo.regionNameByWriteEndpoint = outWriteRegionMap.v; } nextLocationInfo.writeEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableWriteEndpointByLocation, nextLocationInfo.availableWriteLocations, OperationType.Write, this.defaultEndpoint); nextLocationInfo.readEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableReadEndpointByLocation, nextLocationInfo.availableReadLocations, OperationType.Read, nextLocationInfo.writeEndpoints.get(0)); if (nextLocationInfo.preferredLocations == null || nextLocationInfo.preferredLocations.isEmpty()) { Utils.ValueHolder<String> regionForDefaultEndpoint = new Utils.ValueHolder<>(); if (!Utils.tryGetValue(nextLocationInfo.regionNameByReadEndpoint, this.defaultEndpoint, regionForDefaultEndpoint)) { nextLocationInfo.effectivePreferredLocations = nextLocationInfo.availableReadLocations; } } this.lastCacheUpdateTimestamp = Instant.now(); logger.debug("updating location cache finished, new readLocations [{}], new writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); this.locationInfo = nextLocationInfo; } } private UnmodifiableList<URI> getPreferredAvailableEndpoints(UnmodifiableMap<String, URI> endpointsByLocation, UnmodifiableList<String> orderedLocations, OperationType expectedAvailableOperation, URI fallbackEndpoint) { List<URI> endpoints = new ArrayList<>(); DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if (this.enableEndpointDiscovery) { if (this.canUseMultipleWriteLocations() || expectedAvailableOperation.supports(OperationType.Read)) { List<URI> unavailableEndpoints = new ArrayList<>(); if (currentLocationInfo.preferredLocations != null && !currentLocationInfo.preferredLocations.isEmpty()) { for (String location: currentLocationInfo.preferredLocations) { Utils.ValueHolder<URI> endpoint = new Utils.ValueHolder<>(); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.defaultEndpoint.equals(endpoint.v)) { endpoints = new ArrayList<>(); break; } if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } endpoints.addAll(unavailableEndpoints); } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (!Strings.isNullOrEmpty(location) && Utils.tryGetValue(endpointsByLocation, location, endpoint)) { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } return new UnmodifiableList<URI>(endpoints); } private UnmodifiableMap<String, URI> getEndpointByLocation(Iterable<DatabaseAccountLocation> locations, Utils.ValueHolder<UnmodifiableList<String>> orderedLocations, Utils.ValueHolder<UnmodifiableMap<URI, String>> regionMap) { Map<String, URI> endpointsByLocation = new CaseInsensitiveMap<>(); Map<URI, String> regionByEndpoint = new CaseInsensitiveMap<>(); List<String> parsedLocations = new ArrayList<>(); for (DatabaseAccountLocation location: locations) { if (!Strings.isNullOrEmpty(location.getName())) { try { URI endpoint = new URI(location.getEndpoint().toLowerCase(Locale.ROOT)); endpointsByLocation.put(location.getName().toLowerCase(Locale.ROOT), endpoint); regionByEndpoint.put(endpoint, location.getName().toLowerCase(Locale.ROOT)); parsedLocations.add(location.getName()); } catch (Exception e) { logger.warn("GetAvailableEndpointsByLocation() - skipping add for location = [{}] as it is location name is either empty or endpoint is malformed [{}]", location.getName(), location.getEndpoint()); } } } orderedLocations.v = new UnmodifiableList<String>(parsedLocations); regionMap.v = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(regionByEndpoint); return (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(endpointsByLocation); } public boolean canUseMultipleWriteLocations() { return this.useMultipleWriteLocations && this.enableMultipleWriteLocations; } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.canUseMultipleWriteLocations() && (request.getResourceType() == ResourceType.Document || (request.getResourceType() == ResourceType.StoredProcedure && request.getOperationType() == com.azure.cosmos.implementation.OperationType.ExecuteJavaScript)); } private static class LocationUnavailabilityInfo { LocationUnavailabilityInfo(Instant instant, OperationType type) { this.lastUnavailabilityCheckTimeStamp = instant; this.unavailableOperations = type; } public Instant lastUnavailabilityCheckTimeStamp; public OperationType unavailableOperations; } private enum OperationType { None(0x0), Read(0x1), Write(0x2), ReadAndWrite(0x3); private final int flag; public boolean hasReadFlag() { return (flag & Read.flag) != 0; } public boolean hasWriteFlag() { return (flag & Write.flag) != 0; } public static OperationType combine(OperationType t1, OperationType t2) { switch (t1.flag | t2.flag) { case 0x0: return None; case 0x1: return Read; case 0x2: return Write; default: return ReadAndWrite; } } public boolean supports(OperationType type) { return (flag & type.flag) != 0; } OperationType(int flag) { this.flag = flag; } } private boolean durationPassed(Instant end, Instant start, Duration duration) { return end.minus(duration).isAfter(start); } private boolean unavailableLocationsExpirationTimePassed() { return durationPassed(Instant.now(), this.lastCacheUpdateTimestamp, this.unavailableLocationsExpirationTime); } private static boolean isExcludedRegionsSupplierConfigured(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) { return excludedRegionsSupplier != null && excludedRegionsSupplier.get() != null; } static class DatabaseAccountLocationsInfo { private UnmodifiableList<String> preferredLocations; private UnmodifiableList<String> effectivePreferredLocations; private UnmodifiableList<String> availableWriteLocations; private UnmodifiableList<String> availableReadLocations; private UnmodifiableMap<String, URI> availableWriteEndpointByLocation; private UnmodifiableMap<String, URI> availableReadEndpointByLocation; private UnmodifiableMap<URI, String> regionNameByWriteEndpoint; private UnmodifiableMap<URI, String> regionNameByReadEndpoint; private UnmodifiableList<URI> writeEndpoints; private UnmodifiableList<URI> readEndpoints; public DatabaseAccountLocationsInfo(List<String> preferredLocations, URI defaultEndpoint) { this.preferredLocations = new UnmodifiableList<>(preferredLocations.stream().map(loc -> loc.toLowerCase(Locale.ROOT)).collect(Collectors.toList())); this.effectivePreferredLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByWriteEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByReadEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteLocations = new UnmodifiableList<>(Collections.emptyList()); this.readEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); this.writeEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); } public DatabaseAccountLocationsInfo(DatabaseAccountLocationsInfo other) { this.preferredLocations = other.preferredLocations; this.effectivePreferredLocations = other.effectivePreferredLocations; this.availableWriteLocations = other.availableWriteLocations; this.availableReadLocations = other.availableReadLocations; this.availableWriteEndpointByLocation = other.availableWriteEndpointByLocation; this.regionNameByWriteEndpoint = other.regionNameByWriteEndpoint; this.regionNameByReadEndpoint = other.regionNameByReadEndpoint; this.availableReadEndpointByLocation = other.availableReadEndpointByLocation; this.writeEndpoints = other.writeEndpoints; this.readEndpoints = other.readEndpoints; } } }
Will address it shortly.
public boolean shouldRefreshEndpoints(Utils.ValueHolder<Boolean> canRefreshInBackground) { canRefreshInBackground.v = true; DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; String mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.preferredLocations); String mostPreferredEffectiveReadLocation = StringUtils.EMPTY; String mostPreferredEffectiveWriteLocation = StringUtils.EMPTY; if (StringUtils.isEmpty(mostPreferredLocation)) { URI firstAccountLevelReadEndpoint = Utils.firstOrDefault(currentLocationInfo.readEndpoints); if (currentLocationInfo.regionNameByReadEndpoint != null) { mostPreferredEffectiveReadLocation = currentLocationInfo.regionNameByReadEndpoint.getOrDefault(firstAccountLevelReadEndpoint, StringUtils.EMPTY); } URI firstAccountLevelWriteEndpoint = Utils.firstOrDefault(currentLocationInfo.writeEndpoints); if (currentLocationInfo.regionNameByWriteEndpoint != null) { mostPreferredEffectiveWriteLocation = currentLocationInfo.regionNameByWriteEndpoint.getOrDefault(firstAccountLevelWriteEndpoint, StringUtils.EMPTY); } } String mostPreferredReadLocationToUse = !Strings.isNullOrEmpty(mostPreferredLocation) ? mostPreferredLocation : mostPreferredEffectiveReadLocation; String mostPreferredWriteLocationToUse = !Strings.isNullOrEmpty(mostPreferredLocation) ? mostPreferredLocation : mostPreferredEffectiveWriteLocation; if (this.enableEndpointDiscovery) { boolean shouldRefresh = this.useMultipleWriteLocations && !this.enableMultipleWriteLocations; List<URI> readLocationEndpoints = currentLocationInfo.readEndpoints; if (this.isEndpointUnavailable(readLocationEndpoints.get(0), OperationType.Read)) { canRefreshInBackground.v = anyEndpointsAvailable(readLocationEndpoints, OperationType.Read); logger.debug("shouldRefreshEndpoints = true, since the first read endpoint " + "[{}] is not available for read. canRefreshInBackground = [{}]", readLocationEndpoints.get(0), canRefreshInBackground.v); return true; } if (!Strings.isNullOrEmpty(mostPreferredReadLocationToUse)) { Utils.ValueHolder<URI> mostPreferredReadEndpointHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<URI> firstAccountLevelReadEndpointHolder = new Utils.ValueHolder<>(); logger.debug("getReadEndpoints [{}]", readLocationEndpoints); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, mostPreferredReadLocationToUse, mostPreferredReadEndpointHolder)) { logger.debug("most preferred is [{}], most preferred available is [{}]", mostPreferredReadLocationToUse, mostPreferredReadEndpointHolder.v); if (!areEqual(mostPreferredReadEndpointHolder.v, readLocationEndpoints.get(0))) { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}]" + " is not available for read.", mostPreferredReadLocationToUse); return true; } else if (currentLocationInfo.preferredLocations == null || currentLocationInfo.preferredLocations.isEmpty()) { List<String> accountLevelReadRegions = currentLocationInfo.availableReadLocations; String firstAccountLevelReadRegion = Utils.firstOrDefault(accountLevelReadRegions); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, firstAccountLevelReadRegion, firstAccountLevelReadEndpointHolder)) { if (!areEqual(mostPreferredReadEndpointHolder.v, firstAccountLevelReadEndpointHolder.v)) { logger.debug("shouldRefreshEndpoints = true, preferredRegions on client is empty and first account-level read region [{}]" + " is not available for read.", firstAccountLevelReadRegion); return true; } } } logger.debug("most preferred is [{}], and most preferred available [{}] are the same", mostPreferredLocation, mostPreferredReadEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] " + "is not in available read locations.", mostPreferredLocation); return true; } } Utils.ValueHolder<URI> mostPreferredWriteEndpointHolder = new Utils.ValueHolder<>(); List<URI> writeLocationEndpoints = currentLocationInfo.writeEndpoints; logger.debug("getWriteEndpoints [{}]", writeLocationEndpoints); if (!this.canUseMultipleWriteLocations()) { if (this.isEndpointUnavailable(writeLocationEndpoints.get(0), OperationType.Write)) { canRefreshInBackground.v = anyEndpointsAvailable(writeLocationEndpoints,OperationType.Write); logger.debug("shouldRefreshEndpoints = true, most preferred location " + "[{}] endpoint [{}] is not available for write. canRefreshInBackground = [{}]", mostPreferredLocation, writeLocationEndpoints.get(0), canRefreshInBackground.v); return true; } else { logger.debug("shouldRefreshEndpoints: false, [{}] is available for Write", writeLocationEndpoints.get(0)); return shouldRefresh; } } else if (!Strings.isNullOrEmpty(mostPreferredWriteLocationToUse)) { if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, mostPreferredWriteLocationToUse, mostPreferredWriteEndpointHolder)) { shouldRefresh = ! areEqual(mostPreferredWriteEndpointHolder.v, writeLocationEndpoints.get(0)); if (currentLocationInfo.preferredLocations == null || currentLocationInfo.preferredLocations.isEmpty()) { List<String> accountLevelWriteRegions = currentLocationInfo.availableWriteLocations; String firstAccountLevelWriteRegion = Utils.firstOrDefault(accountLevelWriteRegions); Utils.ValueHolder<URI> firstAccountLevelWriteEndpointHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, firstAccountLevelWriteRegion, firstAccountLevelWriteEndpointHolder)) { if (!areEqual(mostPreferredWriteEndpointHolder.v, firstAccountLevelWriteEndpointHolder.v)) { logger.debug("shouldRefreshEndpoints = true, preferredRegions on client is empty and first account-level write region [{}]" + " is not available for write.", firstAccountLevelWriteRegion); shouldRefresh = true; } } } if (shouldRefresh) { logger.debug("shouldRefreshEndpoints: true, write endpoint [{}] is not the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints: false, write endpoint [{}] is the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } return shouldRefresh; } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] is not in available write locations", mostPreferredLocation); return true; } } else { logger.debug("shouldRefreshEndpoints: false, mostPreferredLocation [{}] is empty", mostPreferredLocation); return shouldRefresh; } } else { logger.debug("shouldRefreshEndpoints: false, endpoint discovery not enabled"); return false; } }
String mostPreferredEffectiveReadLocation = StringUtils.EMPTY;
public boolean shouldRefreshEndpoints(Utils.ValueHolder<Boolean> canRefreshInBackground) { canRefreshInBackground.v = true; DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; String mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.preferredLocations); if (StringUtils.isEmpty(mostPreferredLocation)) { mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.effectivePreferredLocations); } if (this.enableEndpointDiscovery) { boolean shouldRefresh = this.useMultipleWriteLocations && !this.enableMultipleWriteLocations; List<URI> readLocationEndpoints = currentLocationInfo.readEndpoints; if (this.isEndpointUnavailable(readLocationEndpoints.get(0), OperationType.Read)) { canRefreshInBackground.v = anyEndpointsAvailable(readLocationEndpoints,OperationType.Read); logger.debug("shouldRefreshEndpoints = true, since the first read endpoint " + "[{}] is not available for read. canRefreshInBackground = [{}]", readLocationEndpoints.get(0), canRefreshInBackground.v); return true; } if (!Strings.isNullOrEmpty(mostPreferredLocation)) { Utils.ValueHolder<URI> mostPreferredReadEndpointHolder = new Utils.ValueHolder<>(); logger.debug("getReadEndpoints [{}]", readLocationEndpoints); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, mostPreferredLocation, mostPreferredReadEndpointHolder)) { logger.debug("most preferred is [{}], most preferred available is [{}]", mostPreferredLocation, mostPreferredReadEndpointHolder.v); if (!areEqual(mostPreferredReadEndpointHolder.v, readLocationEndpoints.get(0))) { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}]" + " is not available for read.", mostPreferredLocation); return true; } logger.debug("most preferred is [{}], and most preferred available [{}] are the same", mostPreferredLocation, mostPreferredReadEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] " + "is not in available read locations.", mostPreferredLocation); return true; } } Utils.ValueHolder<URI> mostPreferredWriteEndpointHolder = new Utils.ValueHolder<>(); List<URI> writeLocationEndpoints = currentLocationInfo.writeEndpoints; logger.debug("getWriteEndpoints [{}]", writeLocationEndpoints); if (!this.canUseMultipleWriteLocations()) { if (this.isEndpointUnavailable(writeLocationEndpoints.get(0), OperationType.Write)) { canRefreshInBackground.v = anyEndpointsAvailable(writeLocationEndpoints,OperationType.Write); logger.debug("shouldRefreshEndpoints = true, most preferred location " + "[{}] endpoint [{}] is not available for write. canRefreshInBackground = [{}]", mostPreferredLocation, writeLocationEndpoints.get(0), canRefreshInBackground.v); return true; } else { logger.debug("shouldRefreshEndpoints: false, [{}] is available for Write", writeLocationEndpoints.get(0)); return shouldRefresh; } } else if (!Strings.isNullOrEmpty(mostPreferredLocation)) { if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, mostPreferredLocation, mostPreferredWriteEndpointHolder)) { shouldRefresh = ! areEqual(mostPreferredWriteEndpointHolder.v,writeLocationEndpoints.get(0)); if (shouldRefresh) { logger.debug("shouldRefreshEndpoints: true, write endpoint [{}] is not the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints: false, write endpoint [{}] is the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } return shouldRefresh; } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] is not in available write locations", mostPreferredLocation); return true; } } else { logger.debug("shouldRefreshEndpoints: false, mostPreferredLocation [{}] is empty", mostPreferredLocation); return shouldRefresh; } } else { logger.debug("shouldRefreshEndpoints: false, endpoint discovery not enabled"); return false; } }
class LocationCache { private final static Logger logger = LoggerFactory.getLogger(LocationCache.class); private final boolean enableEndpointDiscovery; private final URI defaultEndpoint; private final boolean useMultipleWriteLocations; private final Object lockObject; private final Duration unavailableLocationsExpirationTime; private final ConcurrentHashMap<URI, LocationUnavailabilityInfo> locationUnavailabilityInfoByEndpoint; private final ConnectionPolicy connectionPolicy; private DatabaseAccountLocationsInfo locationInfo; private Instant lastCacheUpdateTimestamp; private boolean enableMultipleWriteLocations; public LocationCache( ConnectionPolicy connectionPolicy, URI defaultEndpoint, Configs configs) { List<String> preferredLocations = new ArrayList<>(connectionPolicy.getPreferredRegions() != null ? connectionPolicy.getPreferredRegions() : Collections.emptyList() ); this.locationInfo = new DatabaseAccountLocationsInfo(preferredLocations, defaultEndpoint); this.defaultEndpoint = defaultEndpoint; this.enableEndpointDiscovery = connectionPolicy.isEndpointDiscoveryEnabled(); this.useMultipleWriteLocations = connectionPolicy.isMultipleWriteRegionsEnabled(); this.lockObject = new Object(); this.locationUnavailabilityInfoByEndpoint = new ConcurrentHashMap<>(); this.lastCacheUpdateTimestamp = Instant.MIN; this.enableMultipleWriteLocations = false; this.unavailableLocationsExpirationTime = Duration.ofSeconds(configs.getUnavailableLocationsExpirationTimeInSeconds()); this.connectionPolicy = connectionPolicy; } /** * Gets list of read endpoints ordered by * * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getReadEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.readEndpoints; } /** * Gets list of write endpoints ordered by * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getWriteEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.writeEndpoints; } /*** * Get the list of available read endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableReadEndpoints() { return this.locationInfo.availableReadEndpointByLocation.values().stream().collect(Collectors.toList()); } /*** * Get the list of available write endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableWriteEndpoints() { return this.locationInfo.availableWriteEndpointByLocation.values().stream().collect(Collectors.toList()); } /** * Marks the current location unavailable for read */ public void markEndpointUnavailableForRead(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Read); } /** * Marks the current location unavailable for write */ public void markEndpointUnavailableForWrite(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Write); } /** * Invoked when {@link DatabaseAccount} is read * @param databaseAccount READ DatabaseAccount */ public void onDatabaseAccountRead(DatabaseAccount databaseAccount) { this.updateLocationCache( databaseAccount.getWritableLocations(), databaseAccount.getReadableLocations(), null, BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); } void onLocationPreferenceChanged(UnmodifiableList<String> preferredLocations) { this.updateLocationCache( null, null , preferredLocations, null); } /** * Resolves request to service endpoint. * 1. If this is a write request * (a) If UseMultipleWriteLocations = true * (i) For document writes, resolve to most preferred and available write endpoint. * Once the endpoint is marked unavailable, it is moved to the end of available write endpoint. Current request will * be retried on next preferred available write endpoint. * (ii) For all other resources, always resolve to first/second (regardless of preferred locations) * write getEndpoint in {@link DatabaseAccount * Endpoint of first write location in {@link DatabaseAccount * write operation on all resource types (except during that region's failover). * Only during manual failover, client would retry write on second write location in {@link DatabaseAccount * (b) Else resolve the request to first write getEndpoint in {@link DatabaseAccount * second write getEndpoint in {@link DatabaseAccount * 2. Else resolve the request to most preferred available read getEndpoint (getAutomatic failover for read requests) * @param request Request for which getEndpoint is to be resolved * @return Resolved getEndpoint */ public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { Objects.requireNonNull(request.requestContext, "RxDocumentServiceRequest.requestContext is required and cannot be null."); if(request.requestContext.locationEndpointToRoute != null) { return request.requestContext.locationEndpointToRoute; } int locationIndex = Utils.getValueOrDefault(request.requestContext.locationIndexToRoute, 0); boolean usePreferredLocations = request.requestContext.usePreferredLocations != null ? request.requestContext.usePreferredLocations : true; if(!usePreferredLocations || (request.getOperationType().isWriteOperation() && !this.canUseMultipleWriteLocations(request))) { DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if(this.enableEndpointDiscovery && currentLocationInfo.availableWriteLocations.size() > 0) { locationIndex = Math.min(locationIndex%2, currentLocationInfo.availableWriteLocations.size()-1); String writeLocation = currentLocationInfo.availableWriteLocations.get(locationIndex); return currentLocationInfo.availableWriteEndpointByLocation.get(writeLocation); } else { return this.defaultEndpoint; } } else { UnmodifiableList<URI> endpoints = request.getOperationType().isWriteOperation()? this.getApplicableWriteEndpoints(request) : this.getApplicableReadEndpoints(request); return endpoints.get(locationIndex % endpoints.size()); } } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.getApplicableWriteEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> writeEndpoints = this.getWriteEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return writeEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( writeEndpoints, this.locationInfo.regionNameByWriteEndpoint, this.defaultEndpoint, effectiveExcludedRegionsWithPartitionUnavailableRegions); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.getApplicableReadEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> readEndpoints = this.getReadEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return readEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( readEndpoints, this.locationInfo.regionNameByReadEndpoint, this.locationInfo.writeEndpoints.get(0), effectiveExcludedRegionsWithPartitionUnavailableRegions); } private UnmodifiableList<URI> getApplicableEndpoints( UnmodifiableList<URI> endpoints, UnmodifiableMap<URI, String> regionNameByEndpoint, URI fallbackEndpoint, List<String> excludeRegionList) { List<URI> applicableEndpoints = new ArrayList<>(); for (URI endpoint : endpoints) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (Utils.tryGetValue(regionNameByEndpoint, endpoint, regionName)) { if (!excludeRegionList.stream().anyMatch(regionName.v::equalsIgnoreCase)) { applicableEndpoints.add(endpoint); } } } if (applicableEndpoints.isEmpty()) { applicableEndpoints.add(fallbackEndpoint); } return new UnmodifiableList<>(applicableEndpoints); } private boolean isExcludeRegionsConfigured(List<String> excludedRegionsOnRequest, List<String> excludedRegionsOnClient) { boolean isExcludedRegionsConfiguredOnRequest = !(excludedRegionsOnRequest == null || excludedRegionsOnRequest.isEmpty()); boolean isExcludedRegionsConfiguredOnClient = !(excludedRegionsOnClient == null || excludedRegionsOnClient.isEmpty()); return isExcludedRegionsConfiguredOnRequest || isExcludedRegionsConfiguredOnClient; } public URI resolveFaultInjectionEndpoint(String region, boolean writeOnly) { Utils.ValueHolder<URI> endpointValueHolder = new Utils.ValueHolder<>(); if (writeOnly) { Utils.tryGetValue(this.locationInfo.availableWriteEndpointByLocation, region, endpointValueHolder); } else { Utils.tryGetValue(this.locationInfo.availableReadEndpointByLocation, region, endpointValueHolder); } if (endpointValueHolder.v != null) { return endpointValueHolder.v; } throw new IllegalArgumentException("Can not find service endpoint for region " + region); } public URI getDefaultEndpoint() { return this.defaultEndpoint; } public String getRegionName(URI locationEndpoint, com.azure.cosmos.implementation.OperationType operationType) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (operationType.isWriteOperation()) { if (Utils.tryGetValue(this.locationInfo.regionNameByWriteEndpoint, locationEndpoint, regionName)) { return regionName.v; } } else { if (Utils.tryGetValue(this.locationInfo.regionNameByReadEndpoint, locationEndpoint, regionName)) { return regionName.v; } } return this.locationInfo.availableWriteLocations.get(0).toLowerCase(Locale.ROOT); } private boolean areEqual(URI url1, URI url2) { return url1.equals(url2); } private void clearStaleEndpointUnavailabilityInfo() { if (!this.locationUnavailabilityInfoByEndpoint.isEmpty()) { List<URI> unavailableEndpoints = new ArrayList<>(this.locationUnavailabilityInfoByEndpoint.keySet()); for (URI unavailableEndpoint: unavailableEndpoints) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<LocationUnavailabilityInfo> removedHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, unavailabilityInfoHolder) && durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime) && Utils.tryRemove(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, removedHolder)) { logger.debug( "Removed endpoint [{}] unavailable for operations [{}] from unavailableEndpoints", unavailableEndpoint, unavailabilityInfoHolder.v.unavailableOperations); } } } } private boolean isEndpointUnavailable(URI endpoint, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); if (expectedAvailableOperations == OperationType.None || !Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, endpoint, unavailabilityInfoHolder) || !unavailabilityInfoHolder.v.unavailableOperations.supports(expectedAvailableOperations)) { return false; } else { if (durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime)) { return false; } else { logger.debug( "Endpoint [{}] unavailable for operations [{}] present in unavailableEndpoints", endpoint, unavailabilityInfoHolder.v.unavailableOperations); return true; } } } private boolean anyEndpointsAvailable(List<URI> endpoints, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); boolean anyEndpointsAvailable = false; for (URI endpoint : endpoints) { if (!isEndpointUnavailable(endpoint, expectedAvailableOperations)) { anyEndpointsAvailable = true; break; } } return anyEndpointsAvailable; } private void markEndpointUnavailable( URI unavailableEndpoint, OperationType unavailableOperationType) { Instant currentTime = Instant.now(); LocationUnavailabilityInfo updatedInfo = this.locationUnavailabilityInfoByEndpoint.compute( unavailableEndpoint, new BiFunction<URI, LocationUnavailabilityInfo, LocationUnavailabilityInfo>() { @Override public LocationUnavailabilityInfo apply(URI url, LocationUnavailabilityInfo info) { if (info == null) { return new LocationUnavailabilityInfo(currentTime, unavailableOperationType); } else { info.lastUnavailabilityCheckTimeStamp = currentTime; info.unavailableOperations = OperationType.combine(info.unavailableOperations, unavailableOperationType); return info; } } }); this.updateLocationCache(); logger.debug( "Endpoint [{}] unavailable for [{}] added/updated to unavailableEndpoints with timestamp [{}]", unavailableEndpoint, unavailableOperationType, updatedInfo.lastUnavailabilityCheckTimeStamp); } private void updateLocationCache(){ updateLocationCache(null, null, null, null); } private void updateLocationCache( Iterable<DatabaseAccountLocation> writeLocations, Iterable<DatabaseAccountLocation> readLocations, UnmodifiableList<String> preferenceList, Boolean enableMultipleWriteLocations) { synchronized (this.lockObject) { DatabaseAccountLocationsInfo nextLocationInfo = new DatabaseAccountLocationsInfo(this.locationInfo); logger.debug("updating location cache ..., current readLocations [{}], current writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); if (preferenceList != null) { nextLocationInfo.preferredLocations = preferenceList; } if (enableMultipleWriteLocations != null) { this.enableMultipleWriteLocations = enableMultipleWriteLocations; } this.clearStaleEndpointUnavailabilityInfo(); if (readLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableReadLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outReadRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByReadEndpoint); nextLocationInfo.availableReadEndpointByLocation = this.getEndpointByLocation(readLocations, out, outReadRegionMap); nextLocationInfo.availableReadLocations = out.v; nextLocationInfo.regionNameByReadEndpoint = outReadRegionMap.v; } if (writeLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableWriteLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outWriteRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByWriteEndpoint); nextLocationInfo.availableWriteEndpointByLocation = this.getEndpointByLocation(writeLocations, out, outWriteRegionMap); nextLocationInfo.availableWriteLocations = out.v; nextLocationInfo.regionNameByWriteEndpoint = outWriteRegionMap.v; } nextLocationInfo.writeEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableWriteEndpointByLocation, nextLocationInfo.availableWriteLocations, OperationType.Write, this.defaultEndpoint); nextLocationInfo.readEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableReadEndpointByLocation, nextLocationInfo.availableReadLocations, OperationType.Read, nextLocationInfo.writeEndpoints.get(0)); this.lastCacheUpdateTimestamp = Instant.now(); logger.debug("updating location cache finished, new readLocations [{}], new writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); this.locationInfo = nextLocationInfo; } } private UnmodifiableList<URI> getPreferredAvailableEndpoints(UnmodifiableMap<String, URI> endpointsByLocation, UnmodifiableList<String> orderedLocations, OperationType expectedAvailableOperation, URI fallbackEndpoint) { List<URI> endpoints = new ArrayList<>(); DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if (this.enableEndpointDiscovery) { if (this.canUseMultipleWriteLocations() || expectedAvailableOperation.supports(OperationType.Read)) { List<URI> unavailableEndpoints = new ArrayList<>(); if (currentLocationInfo.preferredLocations != null && !currentLocationInfo.preferredLocations.isEmpty()) { for (String location: currentLocationInfo.preferredLocations) { Utils.ValueHolder<URI> endpoint = new Utils.ValueHolder<>(); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.defaultEndpoint.equals(endpoint.v)) { endpoints = new ArrayList<>(); break; } if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } endpoints.addAll(unavailableEndpoints); } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (!Strings.isNullOrEmpty(location) && Utils.tryGetValue(endpointsByLocation, location, endpoint)) { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } return new UnmodifiableList<URI>(endpoints); } private UnmodifiableMap<String, URI> getEndpointByLocation(Iterable<DatabaseAccountLocation> locations, Utils.ValueHolder<UnmodifiableList<String>> orderedLocations, Utils.ValueHolder<UnmodifiableMap<URI, String>> regionMap) { Map<String, URI> endpointsByLocation = new CaseInsensitiveMap<>(); Map<URI, String> regionByEndpoint = new CaseInsensitiveMap<>(); List<String> parsedLocations = new ArrayList<>(); for (DatabaseAccountLocation location: locations) { if (!Strings.isNullOrEmpty(location.getName())) { try { URI endpoint = new URI(location.getEndpoint().toLowerCase(Locale.ROOT)); endpointsByLocation.put(location.getName().toLowerCase(Locale.ROOT), endpoint); regionByEndpoint.put(endpoint, location.getName().toLowerCase(Locale.ROOT)); parsedLocations.add(location.getName()); } catch (Exception e) { logger.warn("GetAvailableEndpointsByLocation() - skipping add for location = [{}] as it is location name is either empty or endpoint is malformed [{}]", location.getName(), location.getEndpoint()); } } } orderedLocations.v = new UnmodifiableList<String>(parsedLocations); regionMap.v = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(regionByEndpoint); return (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(endpointsByLocation); } public boolean canUseMultipleWriteLocations() { return this.useMultipleWriteLocations && this.enableMultipleWriteLocations; } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.canUseMultipleWriteLocations() && (request.getResourceType() == ResourceType.Document || (request.getResourceType() == ResourceType.StoredProcedure && request.getOperationType() == com.azure.cosmos.implementation.OperationType.ExecuteJavaScript)); } private static class LocationUnavailabilityInfo { LocationUnavailabilityInfo(Instant instant, OperationType type) { this.lastUnavailabilityCheckTimeStamp = instant; this.unavailableOperations = type; } public Instant lastUnavailabilityCheckTimeStamp; public OperationType unavailableOperations; } private enum OperationType { None(0x0), Read(0x1), Write(0x2), ReadAndWrite(0x3); private final int flag; public boolean hasReadFlag() { return (flag & Read.flag) != 0; } public boolean hasWriteFlag() { return (flag & Write.flag) != 0; } public static OperationType combine(OperationType t1, OperationType t2) { switch (t1.flag | t2.flag) { case 0x0: return None; case 0x1: return Read; case 0x2: return Write; default: return ReadAndWrite; } } public boolean supports(OperationType type) { return (flag & type.flag) != 0; } OperationType(int flag) { this.flag = flag; } } private boolean durationPassed(Instant end, Instant start, Duration duration) { return end.minus(duration).isAfter(start); } private boolean unavailableLocationsExpirationTimePassed() { return durationPassed(Instant.now(), this.lastCacheUpdateTimestamp, this.unavailableLocationsExpirationTime); } private static boolean isExcludedRegionsSupplierConfigured(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) { return excludedRegionsSupplier != null && excludedRegionsSupplier.get() != null; } static class DatabaseAccountLocationsInfo { private UnmodifiableList<String> preferredLocations; private UnmodifiableList<String> availableWriteLocations; private UnmodifiableList<String> availableReadLocations; private UnmodifiableMap<String, URI> availableWriteEndpointByLocation; private UnmodifiableMap<String, URI> availableReadEndpointByLocation; private UnmodifiableMap<URI, String> regionNameByWriteEndpoint; private UnmodifiableMap<URI, String> regionNameByReadEndpoint; private UnmodifiableList<URI> writeEndpoints; private UnmodifiableList<URI> readEndpoints; public DatabaseAccountLocationsInfo(List<String> preferredLocations, URI defaultEndpoint) { this.preferredLocations = new UnmodifiableList<>(preferredLocations.stream().map(loc -> loc.toLowerCase(Locale.ROOT)).collect(Collectors.toList())); this.availableWriteEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByWriteEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByReadEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteLocations = new UnmodifiableList<>(Collections.emptyList()); this.readEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); this.writeEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); } public DatabaseAccountLocationsInfo(DatabaseAccountLocationsInfo other) { this.preferredLocations = other.preferredLocations; this.availableWriteLocations = other.availableWriteLocations; this.availableReadLocations = other.availableReadLocations; this.availableWriteEndpointByLocation = other.availableWriteEndpointByLocation; this.regionNameByWriteEndpoint = other.regionNameByWriteEndpoint; this.regionNameByReadEndpoint = other.regionNameByReadEndpoint; this.availableReadEndpointByLocation = other.availableReadEndpointByLocation; this.writeEndpoints = other.writeEndpoints; this.readEndpoints = other.readEndpoints; } } }
class LocationCache { private final static Logger logger = LoggerFactory.getLogger(LocationCache.class); private final boolean enableEndpointDiscovery; private final URI defaultEndpoint; private final boolean useMultipleWriteLocations; private final Object lockObject; private final Duration unavailableLocationsExpirationTime; private final ConcurrentHashMap<URI, LocationUnavailabilityInfo> locationUnavailabilityInfoByEndpoint; private final ConnectionPolicy connectionPolicy; private DatabaseAccountLocationsInfo locationInfo; private Instant lastCacheUpdateTimestamp; private boolean enableMultipleWriteLocations; public LocationCache( ConnectionPolicy connectionPolicy, URI defaultEndpoint, Configs configs) { List<String> preferredLocations = new ArrayList<>(connectionPolicy.getPreferredRegions() != null ? connectionPolicy.getPreferredRegions() : Collections.emptyList() ); this.locationInfo = new DatabaseAccountLocationsInfo(preferredLocations, defaultEndpoint); this.defaultEndpoint = defaultEndpoint; this.enableEndpointDiscovery = connectionPolicy.isEndpointDiscoveryEnabled(); this.useMultipleWriteLocations = connectionPolicy.isMultipleWriteRegionsEnabled(); this.lockObject = new Object(); this.locationUnavailabilityInfoByEndpoint = new ConcurrentHashMap<>(); this.lastCacheUpdateTimestamp = Instant.MIN; this.enableMultipleWriteLocations = false; this.unavailableLocationsExpirationTime = Duration.ofSeconds(configs.getUnavailableLocationsExpirationTimeInSeconds()); this.connectionPolicy = connectionPolicy; } /** * Gets list of read endpoints ordered by * * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getReadEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.readEndpoints; } /** * Gets list of write endpoints ordered by * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getWriteEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.writeEndpoints; } /*** * Get the list of available read endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableReadEndpoints() { return this.locationInfo.availableReadEndpointByLocation.values().stream().collect(Collectors.toList()); } /*** * Get the list of available write endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableWriteEndpoints() { return this.locationInfo.availableWriteEndpointByLocation.values().stream().collect(Collectors.toList()); } public List<String> getEffectivePreferredLocations() { return this.locationInfo.effectivePreferredLocations; } /** * Marks the current location unavailable for read */ public void markEndpointUnavailableForRead(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Read); } /** * Marks the current location unavailable for write */ public void markEndpointUnavailableForWrite(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Write); } /** * Invoked when {@link DatabaseAccount} is read * @param databaseAccount READ DatabaseAccount */ public void onDatabaseAccountRead(DatabaseAccount databaseAccount) { this.updateLocationCache( databaseAccount.getWritableLocations(), databaseAccount.getReadableLocations(), null, BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); } void onLocationPreferenceChanged(UnmodifiableList<String> preferredLocations) { this.updateLocationCache( null, null , preferredLocations, null); } /** * Resolves request to service endpoint. * 1. If this is a write request * (a) If UseMultipleWriteLocations = true * (i) For document writes, resolve to most preferred and available write endpoint. * Once the endpoint is marked unavailable, it is moved to the end of available write endpoint. Current request will * be retried on next preferred available write endpoint. * (ii) For all other resources, always resolve to first/second (regardless of preferred locations) * write getEndpoint in {@link DatabaseAccount * Endpoint of first write location in {@link DatabaseAccount * write operation on all resource types (except during that region's failover). * Only during manual failover, client would retry write on second write location in {@link DatabaseAccount * (b) Else resolve the request to first write getEndpoint in {@link DatabaseAccount * second write getEndpoint in {@link DatabaseAccount * 2. Else resolve the request to most preferred available read getEndpoint (getAutomatic failover for read requests) * @param request Request for which getEndpoint is to be resolved * @return Resolved getEndpoint */ public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { Objects.requireNonNull(request.requestContext, "RxDocumentServiceRequest.requestContext is required and cannot be null."); if(request.requestContext.locationEndpointToRoute != null) { return request.requestContext.locationEndpointToRoute; } int locationIndex = Utils.getValueOrDefault(request.requestContext.locationIndexToRoute, 0); boolean usePreferredLocations = request.requestContext.usePreferredLocations != null ? request.requestContext.usePreferredLocations : true; if(!usePreferredLocations || (request.getOperationType().isWriteOperation() && !this.canUseMultipleWriteLocations(request))) { DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if(this.enableEndpointDiscovery && currentLocationInfo.availableWriteLocations.size() > 0) { locationIndex = Math.min(locationIndex%2, currentLocationInfo.availableWriteLocations.size()-1); String writeLocation = currentLocationInfo.availableWriteLocations.get(locationIndex); return currentLocationInfo.availableWriteEndpointByLocation.get(writeLocation); } else { return this.defaultEndpoint; } } else { UnmodifiableList<URI> endpoints = request.getOperationType().isWriteOperation()? this.getApplicableWriteEndpoints(request) : this.getApplicableReadEndpoints(request); return endpoints.get(locationIndex % endpoints.size()); } } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.getApplicableWriteEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> writeEndpoints = this.getWriteEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return writeEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( writeEndpoints, this.locationInfo.regionNameByWriteEndpoint, this.defaultEndpoint, effectiveExcludedRegionsWithPartitionUnavailableRegions); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.getApplicableReadEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> readEndpoints = this.getReadEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return readEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( readEndpoints, this.locationInfo.regionNameByReadEndpoint, this.locationInfo.writeEndpoints.get(0), effectiveExcludedRegionsWithPartitionUnavailableRegions); } private UnmodifiableList<URI> getApplicableEndpoints( UnmodifiableList<URI> endpoints, UnmodifiableMap<URI, String> regionNameByEndpoint, URI fallbackEndpoint, List<String> excludeRegionList) { List<URI> applicableEndpoints = new ArrayList<>(); for (URI endpoint : endpoints) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (Utils.tryGetValue(regionNameByEndpoint, endpoint, regionName)) { if (!excludeRegionList.stream().anyMatch(regionName.v::equalsIgnoreCase)) { applicableEndpoints.add(endpoint); } } } if (applicableEndpoints.isEmpty()) { applicableEndpoints.add(fallbackEndpoint); } return new UnmodifiableList<>(applicableEndpoints); } private boolean isExcludeRegionsConfigured(List<String> excludedRegionsOnRequest, List<String> excludedRegionsOnClient) { boolean isExcludedRegionsConfiguredOnRequest = !(excludedRegionsOnRequest == null || excludedRegionsOnRequest.isEmpty()); boolean isExcludedRegionsConfiguredOnClient = !(excludedRegionsOnClient == null || excludedRegionsOnClient.isEmpty()); return isExcludedRegionsConfiguredOnRequest || isExcludedRegionsConfiguredOnClient; } public URI resolveFaultInjectionEndpoint(String region, boolean writeOnly) { Utils.ValueHolder<URI> endpointValueHolder = new Utils.ValueHolder<>(); if (writeOnly) { Utils.tryGetValue(this.locationInfo.availableWriteEndpointByLocation, region, endpointValueHolder); } else { Utils.tryGetValue(this.locationInfo.availableReadEndpointByLocation, region, endpointValueHolder); } if (endpointValueHolder.v != null) { return endpointValueHolder.v; } throw new IllegalArgumentException("Can not find service endpoint for region " + region); } public URI getDefaultEndpoint() { return this.defaultEndpoint; } public String getRegionName(URI locationEndpoint, com.azure.cosmos.implementation.OperationType operationType) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (operationType.isWriteOperation()) { if (Utils.tryGetValue(this.locationInfo.regionNameByWriteEndpoint, locationEndpoint, regionName)) { return regionName.v; } } else { if (Utils.tryGetValue(this.locationInfo.regionNameByReadEndpoint, locationEndpoint, regionName)) { return regionName.v; } } return this.locationInfo.availableWriteLocations.get(0).toLowerCase(Locale.ROOT); } private boolean areEqual(URI url1, URI url2) { return url1.equals(url2); } private void clearStaleEndpointUnavailabilityInfo() { if (!this.locationUnavailabilityInfoByEndpoint.isEmpty()) { List<URI> unavailableEndpoints = new ArrayList<>(this.locationUnavailabilityInfoByEndpoint.keySet()); for (URI unavailableEndpoint: unavailableEndpoints) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<LocationUnavailabilityInfo> removedHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, unavailabilityInfoHolder) && durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime) && Utils.tryRemove(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, removedHolder)) { logger.debug( "Removed endpoint [{}] unavailable for operations [{}] from unavailableEndpoints", unavailableEndpoint, unavailabilityInfoHolder.v.unavailableOperations); } } } } private boolean isEndpointUnavailable(URI endpoint, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); if (expectedAvailableOperations == OperationType.None || !Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, endpoint, unavailabilityInfoHolder) || !unavailabilityInfoHolder.v.unavailableOperations.supports(expectedAvailableOperations)) { return false; } else { if (durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime)) { return false; } else { logger.debug( "Endpoint [{}] unavailable for operations [{}] present in unavailableEndpoints", endpoint, unavailabilityInfoHolder.v.unavailableOperations); return true; } } } private boolean anyEndpointsAvailable(List<URI> endpoints, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); boolean anyEndpointsAvailable = false; for (URI endpoint : endpoints) { if (!isEndpointUnavailable(endpoint, expectedAvailableOperations)) { anyEndpointsAvailable = true; break; } } return anyEndpointsAvailable; } private void markEndpointUnavailable( URI unavailableEndpoint, OperationType unavailableOperationType) { Instant currentTime = Instant.now(); LocationUnavailabilityInfo updatedInfo = this.locationUnavailabilityInfoByEndpoint.compute( unavailableEndpoint, new BiFunction<URI, LocationUnavailabilityInfo, LocationUnavailabilityInfo>() { @Override public LocationUnavailabilityInfo apply(URI url, LocationUnavailabilityInfo info) { if (info == null) { return new LocationUnavailabilityInfo(currentTime, unavailableOperationType); } else { info.lastUnavailabilityCheckTimeStamp = currentTime; info.unavailableOperations = OperationType.combine(info.unavailableOperations, unavailableOperationType); return info; } } }); this.updateLocationCache(); logger.debug( "Endpoint [{}] unavailable for [{}] added/updated to unavailableEndpoints with timestamp [{}]", unavailableEndpoint, unavailableOperationType, updatedInfo.lastUnavailabilityCheckTimeStamp); } private void updateLocationCache(){ updateLocationCache(null, null, null, null); } private void updateLocationCache( Iterable<DatabaseAccountLocation> writeLocations, Iterable<DatabaseAccountLocation> readLocations, UnmodifiableList<String> preferenceList, Boolean enableMultipleWriteLocations) { synchronized (this.lockObject) { DatabaseAccountLocationsInfo nextLocationInfo = new DatabaseAccountLocationsInfo(this.locationInfo); logger.debug("updating location cache ..., current readLocations [{}], current writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); if (preferenceList != null) { nextLocationInfo.preferredLocations = preferenceList; } if (enableMultipleWriteLocations != null) { this.enableMultipleWriteLocations = enableMultipleWriteLocations; } this.clearStaleEndpointUnavailabilityInfo(); if (readLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableReadLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outReadRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByReadEndpoint); nextLocationInfo.availableReadEndpointByLocation = this.getEndpointByLocation(readLocations, out, outReadRegionMap); nextLocationInfo.availableReadLocations = out.v; nextLocationInfo.regionNameByReadEndpoint = outReadRegionMap.v; } if (writeLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableWriteLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outWriteRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByWriteEndpoint); nextLocationInfo.availableWriteEndpointByLocation = this.getEndpointByLocation(writeLocations, out, outWriteRegionMap); nextLocationInfo.availableWriteLocations = out.v; nextLocationInfo.regionNameByWriteEndpoint = outWriteRegionMap.v; } nextLocationInfo.writeEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableWriteEndpointByLocation, nextLocationInfo.availableWriteLocations, OperationType.Write, this.defaultEndpoint); nextLocationInfo.readEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableReadEndpointByLocation, nextLocationInfo.availableReadLocations, OperationType.Read, nextLocationInfo.writeEndpoints.get(0)); if (nextLocationInfo.preferredLocations == null || nextLocationInfo.preferredLocations.isEmpty()) { Utils.ValueHolder<String> regionForDefaultEndpoint = new Utils.ValueHolder<>(); if (!Utils.tryGetValue(nextLocationInfo.regionNameByReadEndpoint, this.defaultEndpoint, regionForDefaultEndpoint)) { nextLocationInfo.effectivePreferredLocations = nextLocationInfo.availableReadLocations; } } this.lastCacheUpdateTimestamp = Instant.now(); logger.debug("updating location cache finished, new readLocations [{}], new writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); this.locationInfo = nextLocationInfo; } } private UnmodifiableList<URI> getPreferredAvailableEndpoints(UnmodifiableMap<String, URI> endpointsByLocation, UnmodifiableList<String> orderedLocations, OperationType expectedAvailableOperation, URI fallbackEndpoint) { List<URI> endpoints = new ArrayList<>(); DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if (this.enableEndpointDiscovery) { if (this.canUseMultipleWriteLocations() || expectedAvailableOperation.supports(OperationType.Read)) { List<URI> unavailableEndpoints = new ArrayList<>(); if (currentLocationInfo.preferredLocations != null && !currentLocationInfo.preferredLocations.isEmpty()) { for (String location: currentLocationInfo.preferredLocations) { Utils.ValueHolder<URI> endpoint = new Utils.ValueHolder<>(); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.defaultEndpoint.equals(endpoint.v)) { endpoints = new ArrayList<>(); break; } if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } endpoints.addAll(unavailableEndpoints); } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (!Strings.isNullOrEmpty(location) && Utils.tryGetValue(endpointsByLocation, location, endpoint)) { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } return new UnmodifiableList<URI>(endpoints); } private UnmodifiableMap<String, URI> getEndpointByLocation(Iterable<DatabaseAccountLocation> locations, Utils.ValueHolder<UnmodifiableList<String>> orderedLocations, Utils.ValueHolder<UnmodifiableMap<URI, String>> regionMap) { Map<String, URI> endpointsByLocation = new CaseInsensitiveMap<>(); Map<URI, String> regionByEndpoint = new CaseInsensitiveMap<>(); List<String> parsedLocations = new ArrayList<>(); for (DatabaseAccountLocation location: locations) { if (!Strings.isNullOrEmpty(location.getName())) { try { URI endpoint = new URI(location.getEndpoint().toLowerCase(Locale.ROOT)); endpointsByLocation.put(location.getName().toLowerCase(Locale.ROOT), endpoint); regionByEndpoint.put(endpoint, location.getName().toLowerCase(Locale.ROOT)); parsedLocations.add(location.getName()); } catch (Exception e) { logger.warn("GetAvailableEndpointsByLocation() - skipping add for location = [{}] as it is location name is either empty or endpoint is malformed [{}]", location.getName(), location.getEndpoint()); } } } orderedLocations.v = new UnmodifiableList<String>(parsedLocations); regionMap.v = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(regionByEndpoint); return (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(endpointsByLocation); } public boolean canUseMultipleWriteLocations() { return this.useMultipleWriteLocations && this.enableMultipleWriteLocations; } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.canUseMultipleWriteLocations() && (request.getResourceType() == ResourceType.Document || (request.getResourceType() == ResourceType.StoredProcedure && request.getOperationType() == com.azure.cosmos.implementation.OperationType.ExecuteJavaScript)); } private static class LocationUnavailabilityInfo { LocationUnavailabilityInfo(Instant instant, OperationType type) { this.lastUnavailabilityCheckTimeStamp = instant; this.unavailableOperations = type; } public Instant lastUnavailabilityCheckTimeStamp; public OperationType unavailableOperations; } private enum OperationType { None(0x0), Read(0x1), Write(0x2), ReadAndWrite(0x3); private final int flag; public boolean hasReadFlag() { return (flag & Read.flag) != 0; } public boolean hasWriteFlag() { return (flag & Write.flag) != 0; } public static OperationType combine(OperationType t1, OperationType t2) { switch (t1.flag | t2.flag) { case 0x0: return None; case 0x1: return Read; case 0x2: return Write; default: return ReadAndWrite; } } public boolean supports(OperationType type) { return (flag & type.flag) != 0; } OperationType(int flag) { this.flag = flag; } } private boolean durationPassed(Instant end, Instant start, Duration duration) { return end.minus(duration).isAfter(start); } private boolean unavailableLocationsExpirationTimePassed() { return durationPassed(Instant.now(), this.lastCacheUpdateTimestamp, this.unavailableLocationsExpirationTime); } private static boolean isExcludedRegionsSupplierConfigured(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) { return excludedRegionsSupplier != null && excludedRegionsSupplier.get() != null; } static class DatabaseAccountLocationsInfo { private UnmodifiableList<String> preferredLocations; private UnmodifiableList<String> effectivePreferredLocations; private UnmodifiableList<String> availableWriteLocations; private UnmodifiableList<String> availableReadLocations; private UnmodifiableMap<String, URI> availableWriteEndpointByLocation; private UnmodifiableMap<String, URI> availableReadEndpointByLocation; private UnmodifiableMap<URI, String> regionNameByWriteEndpoint; private UnmodifiableMap<URI, String> regionNameByReadEndpoint; private UnmodifiableList<URI> writeEndpoints; private UnmodifiableList<URI> readEndpoints; public DatabaseAccountLocationsInfo(List<String> preferredLocations, URI defaultEndpoint) { this.preferredLocations = new UnmodifiableList<>(preferredLocations.stream().map(loc -> loc.toLowerCase(Locale.ROOT)).collect(Collectors.toList())); this.effectivePreferredLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByWriteEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByReadEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteLocations = new UnmodifiableList<>(Collections.emptyList()); this.readEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); this.writeEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); } public DatabaseAccountLocationsInfo(DatabaseAccountLocationsInfo other) { this.preferredLocations = other.preferredLocations; this.effectivePreferredLocations = other.effectivePreferredLocations; this.availableWriteLocations = other.availableWriteLocations; this.availableReadLocations = other.availableReadLocations; this.availableWriteEndpointByLocation = other.availableWriteEndpointByLocation; this.regionNameByWriteEndpoint = other.regionNameByWriteEndpoint; this.regionNameByReadEndpoint = other.regionNameByReadEndpoint; this.availableReadEndpointByLocation = other.availableReadEndpointByLocation; this.writeEndpoints = other.writeEndpoints; this.readEndpoints = other.readEndpoints; } } }
or maybe we need to maintain preferredRegions, but add another one effectivePreferredRegions in the locationInfo, depending on how the preferredRegions is used currently
public boolean shouldRefreshEndpoints(Utils.ValueHolder<Boolean> canRefreshInBackground) { canRefreshInBackground.v = true; DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; String mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.preferredLocations); String mostPreferredEffectiveReadLocation = StringUtils.EMPTY; String mostPreferredEffectiveWriteLocation = StringUtils.EMPTY; if (StringUtils.isEmpty(mostPreferredLocation)) { URI firstAccountLevelReadEndpoint = Utils.firstOrDefault(currentLocationInfo.readEndpoints); if (currentLocationInfo.regionNameByReadEndpoint != null) { mostPreferredEffectiveReadLocation = currentLocationInfo.regionNameByReadEndpoint.getOrDefault(firstAccountLevelReadEndpoint, StringUtils.EMPTY); } URI firstAccountLevelWriteEndpoint = Utils.firstOrDefault(currentLocationInfo.writeEndpoints); if (currentLocationInfo.regionNameByWriteEndpoint != null) { mostPreferredEffectiveWriteLocation = currentLocationInfo.regionNameByWriteEndpoint.getOrDefault(firstAccountLevelWriteEndpoint, StringUtils.EMPTY); } } String mostPreferredReadLocationToUse = !Strings.isNullOrEmpty(mostPreferredLocation) ? mostPreferredLocation : mostPreferredEffectiveReadLocation; String mostPreferredWriteLocationToUse = !Strings.isNullOrEmpty(mostPreferredLocation) ? mostPreferredLocation : mostPreferredEffectiveWriteLocation; if (this.enableEndpointDiscovery) { boolean shouldRefresh = this.useMultipleWriteLocations && !this.enableMultipleWriteLocations; List<URI> readLocationEndpoints = currentLocationInfo.readEndpoints; if (this.isEndpointUnavailable(readLocationEndpoints.get(0), OperationType.Read)) { canRefreshInBackground.v = anyEndpointsAvailable(readLocationEndpoints, OperationType.Read); logger.debug("shouldRefreshEndpoints = true, since the first read endpoint " + "[{}] is not available for read. canRefreshInBackground = [{}]", readLocationEndpoints.get(0), canRefreshInBackground.v); return true; } if (!Strings.isNullOrEmpty(mostPreferredReadLocationToUse)) { Utils.ValueHolder<URI> mostPreferredReadEndpointHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<URI> firstAccountLevelReadEndpointHolder = new Utils.ValueHolder<>(); logger.debug("getReadEndpoints [{}]", readLocationEndpoints); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, mostPreferredReadLocationToUse, mostPreferredReadEndpointHolder)) { logger.debug("most preferred is [{}], most preferred available is [{}]", mostPreferredReadLocationToUse, mostPreferredReadEndpointHolder.v); if (!areEqual(mostPreferredReadEndpointHolder.v, readLocationEndpoints.get(0))) { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}]" + " is not available for read.", mostPreferredReadLocationToUse); return true; } else if (currentLocationInfo.preferredLocations == null || currentLocationInfo.preferredLocations.isEmpty()) { List<String> accountLevelReadRegions = currentLocationInfo.availableReadLocations; String firstAccountLevelReadRegion = Utils.firstOrDefault(accountLevelReadRegions); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, firstAccountLevelReadRegion, firstAccountLevelReadEndpointHolder)) { if (!areEqual(mostPreferredReadEndpointHolder.v, firstAccountLevelReadEndpointHolder.v)) { logger.debug("shouldRefreshEndpoints = true, preferredRegions on client is empty and first account-level read region [{}]" + " is not available for read.", firstAccountLevelReadRegion); return true; } } } logger.debug("most preferred is [{}], and most preferred available [{}] are the same", mostPreferredLocation, mostPreferredReadEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] " + "is not in available read locations.", mostPreferredLocation); return true; } } Utils.ValueHolder<URI> mostPreferredWriteEndpointHolder = new Utils.ValueHolder<>(); List<URI> writeLocationEndpoints = currentLocationInfo.writeEndpoints; logger.debug("getWriteEndpoints [{}]", writeLocationEndpoints); if (!this.canUseMultipleWriteLocations()) { if (this.isEndpointUnavailable(writeLocationEndpoints.get(0), OperationType.Write)) { canRefreshInBackground.v = anyEndpointsAvailable(writeLocationEndpoints,OperationType.Write); logger.debug("shouldRefreshEndpoints = true, most preferred location " + "[{}] endpoint [{}] is not available for write. canRefreshInBackground = [{}]", mostPreferredLocation, writeLocationEndpoints.get(0), canRefreshInBackground.v); return true; } else { logger.debug("shouldRefreshEndpoints: false, [{}] is available for Write", writeLocationEndpoints.get(0)); return shouldRefresh; } } else if (!Strings.isNullOrEmpty(mostPreferredWriteLocationToUse)) { if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, mostPreferredWriteLocationToUse, mostPreferredWriteEndpointHolder)) { shouldRefresh = ! areEqual(mostPreferredWriteEndpointHolder.v, writeLocationEndpoints.get(0)); if (currentLocationInfo.preferredLocations == null || currentLocationInfo.preferredLocations.isEmpty()) { List<String> accountLevelWriteRegions = currentLocationInfo.availableWriteLocations; String firstAccountLevelWriteRegion = Utils.firstOrDefault(accountLevelWriteRegions); Utils.ValueHolder<URI> firstAccountLevelWriteEndpointHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, firstAccountLevelWriteRegion, firstAccountLevelWriteEndpointHolder)) { if (!areEqual(mostPreferredWriteEndpointHolder.v, firstAccountLevelWriteEndpointHolder.v)) { logger.debug("shouldRefreshEndpoints = true, preferredRegions on client is empty and first account-level write region [{}]" + " is not available for write.", firstAccountLevelWriteRegion); shouldRefresh = true; } } } if (shouldRefresh) { logger.debug("shouldRefreshEndpoints: true, write endpoint [{}] is not the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints: false, write endpoint [{}] is the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } return shouldRefresh; } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] is not in available write locations", mostPreferredLocation); return true; } } else { logger.debug("shouldRefreshEndpoints: false, mostPreferredLocation [{}] is empty", mostPreferredLocation); return shouldRefresh; } } else { logger.debug("shouldRefreshEndpoints: false, endpoint discovery not enabled"); return false; } }
String mostPreferredEffectiveReadLocation = StringUtils.EMPTY;
public boolean shouldRefreshEndpoints(Utils.ValueHolder<Boolean> canRefreshInBackground) { canRefreshInBackground.v = true; DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; String mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.preferredLocations); if (StringUtils.isEmpty(mostPreferredLocation)) { mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.effectivePreferredLocations); } if (this.enableEndpointDiscovery) { boolean shouldRefresh = this.useMultipleWriteLocations && !this.enableMultipleWriteLocations; List<URI> readLocationEndpoints = currentLocationInfo.readEndpoints; if (this.isEndpointUnavailable(readLocationEndpoints.get(0), OperationType.Read)) { canRefreshInBackground.v = anyEndpointsAvailable(readLocationEndpoints,OperationType.Read); logger.debug("shouldRefreshEndpoints = true, since the first read endpoint " + "[{}] is not available for read. canRefreshInBackground = [{}]", readLocationEndpoints.get(0), canRefreshInBackground.v); return true; } if (!Strings.isNullOrEmpty(mostPreferredLocation)) { Utils.ValueHolder<URI> mostPreferredReadEndpointHolder = new Utils.ValueHolder<>(); logger.debug("getReadEndpoints [{}]", readLocationEndpoints); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, mostPreferredLocation, mostPreferredReadEndpointHolder)) { logger.debug("most preferred is [{}], most preferred available is [{}]", mostPreferredLocation, mostPreferredReadEndpointHolder.v); if (!areEqual(mostPreferredReadEndpointHolder.v, readLocationEndpoints.get(0))) { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}]" + " is not available for read.", mostPreferredLocation); return true; } logger.debug("most preferred is [{}], and most preferred available [{}] are the same", mostPreferredLocation, mostPreferredReadEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] " + "is not in available read locations.", mostPreferredLocation); return true; } } Utils.ValueHolder<URI> mostPreferredWriteEndpointHolder = new Utils.ValueHolder<>(); List<URI> writeLocationEndpoints = currentLocationInfo.writeEndpoints; logger.debug("getWriteEndpoints [{}]", writeLocationEndpoints); if (!this.canUseMultipleWriteLocations()) { if (this.isEndpointUnavailable(writeLocationEndpoints.get(0), OperationType.Write)) { canRefreshInBackground.v = anyEndpointsAvailable(writeLocationEndpoints,OperationType.Write); logger.debug("shouldRefreshEndpoints = true, most preferred location " + "[{}] endpoint [{}] is not available for write. canRefreshInBackground = [{}]", mostPreferredLocation, writeLocationEndpoints.get(0), canRefreshInBackground.v); return true; } else { logger.debug("shouldRefreshEndpoints: false, [{}] is available for Write", writeLocationEndpoints.get(0)); return shouldRefresh; } } else if (!Strings.isNullOrEmpty(mostPreferredLocation)) { if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, mostPreferredLocation, mostPreferredWriteEndpointHolder)) { shouldRefresh = ! areEqual(mostPreferredWriteEndpointHolder.v,writeLocationEndpoints.get(0)); if (shouldRefresh) { logger.debug("shouldRefreshEndpoints: true, write endpoint [{}] is not the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints: false, write endpoint [{}] is the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } return shouldRefresh; } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] is not in available write locations", mostPreferredLocation); return true; } } else { logger.debug("shouldRefreshEndpoints: false, mostPreferredLocation [{}] is empty", mostPreferredLocation); return shouldRefresh; } } else { logger.debug("shouldRefreshEndpoints: false, endpoint discovery not enabled"); return false; } }
class LocationCache { private final static Logger logger = LoggerFactory.getLogger(LocationCache.class); private final boolean enableEndpointDiscovery; private final URI defaultEndpoint; private final boolean useMultipleWriteLocations; private final Object lockObject; private final Duration unavailableLocationsExpirationTime; private final ConcurrentHashMap<URI, LocationUnavailabilityInfo> locationUnavailabilityInfoByEndpoint; private final ConnectionPolicy connectionPolicy; private DatabaseAccountLocationsInfo locationInfo; private Instant lastCacheUpdateTimestamp; private boolean enableMultipleWriteLocations; public LocationCache( ConnectionPolicy connectionPolicy, URI defaultEndpoint, Configs configs) { List<String> preferredLocations = new ArrayList<>(connectionPolicy.getPreferredRegions() != null ? connectionPolicy.getPreferredRegions() : Collections.emptyList() ); this.locationInfo = new DatabaseAccountLocationsInfo(preferredLocations, defaultEndpoint); this.defaultEndpoint = defaultEndpoint; this.enableEndpointDiscovery = connectionPolicy.isEndpointDiscoveryEnabled(); this.useMultipleWriteLocations = connectionPolicy.isMultipleWriteRegionsEnabled(); this.lockObject = new Object(); this.locationUnavailabilityInfoByEndpoint = new ConcurrentHashMap<>(); this.lastCacheUpdateTimestamp = Instant.MIN; this.enableMultipleWriteLocations = false; this.unavailableLocationsExpirationTime = Duration.ofSeconds(configs.getUnavailableLocationsExpirationTimeInSeconds()); this.connectionPolicy = connectionPolicy; } /** * Gets list of read endpoints ordered by * * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getReadEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.readEndpoints; } /** * Gets list of write endpoints ordered by * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getWriteEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.writeEndpoints; } /*** * Get the list of available read endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableReadEndpoints() { return this.locationInfo.availableReadEndpointByLocation.values().stream().collect(Collectors.toList()); } /*** * Get the list of available write endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableWriteEndpoints() { return this.locationInfo.availableWriteEndpointByLocation.values().stream().collect(Collectors.toList()); } /** * Marks the current location unavailable for read */ public void markEndpointUnavailableForRead(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Read); } /** * Marks the current location unavailable for write */ public void markEndpointUnavailableForWrite(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Write); } /** * Invoked when {@link DatabaseAccount} is read * @param databaseAccount READ DatabaseAccount */ public void onDatabaseAccountRead(DatabaseAccount databaseAccount) { this.updateLocationCache( databaseAccount.getWritableLocations(), databaseAccount.getReadableLocations(), null, BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); } void onLocationPreferenceChanged(UnmodifiableList<String> preferredLocations) { this.updateLocationCache( null, null , preferredLocations, null); } /** * Resolves request to service endpoint. * 1. If this is a write request * (a) If UseMultipleWriteLocations = true * (i) For document writes, resolve to most preferred and available write endpoint. * Once the endpoint is marked unavailable, it is moved to the end of available write endpoint. Current request will * be retried on next preferred available write endpoint. * (ii) For all other resources, always resolve to first/second (regardless of preferred locations) * write getEndpoint in {@link DatabaseAccount * Endpoint of first write location in {@link DatabaseAccount * write operation on all resource types (except during that region's failover). * Only during manual failover, client would retry write on second write location in {@link DatabaseAccount * (b) Else resolve the request to first write getEndpoint in {@link DatabaseAccount * second write getEndpoint in {@link DatabaseAccount * 2. Else resolve the request to most preferred available read getEndpoint (getAutomatic failover for read requests) * @param request Request for which getEndpoint is to be resolved * @return Resolved getEndpoint */ public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { Objects.requireNonNull(request.requestContext, "RxDocumentServiceRequest.requestContext is required and cannot be null."); if(request.requestContext.locationEndpointToRoute != null) { return request.requestContext.locationEndpointToRoute; } int locationIndex = Utils.getValueOrDefault(request.requestContext.locationIndexToRoute, 0); boolean usePreferredLocations = request.requestContext.usePreferredLocations != null ? request.requestContext.usePreferredLocations : true; if(!usePreferredLocations || (request.getOperationType().isWriteOperation() && !this.canUseMultipleWriteLocations(request))) { DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if(this.enableEndpointDiscovery && currentLocationInfo.availableWriteLocations.size() > 0) { locationIndex = Math.min(locationIndex%2, currentLocationInfo.availableWriteLocations.size()-1); String writeLocation = currentLocationInfo.availableWriteLocations.get(locationIndex); return currentLocationInfo.availableWriteEndpointByLocation.get(writeLocation); } else { return this.defaultEndpoint; } } else { UnmodifiableList<URI> endpoints = request.getOperationType().isWriteOperation()? this.getApplicableWriteEndpoints(request) : this.getApplicableReadEndpoints(request); return endpoints.get(locationIndex % endpoints.size()); } } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.getApplicableWriteEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> writeEndpoints = this.getWriteEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return writeEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( writeEndpoints, this.locationInfo.regionNameByWriteEndpoint, this.defaultEndpoint, effectiveExcludedRegionsWithPartitionUnavailableRegions); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.getApplicableReadEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> readEndpoints = this.getReadEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return readEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( readEndpoints, this.locationInfo.regionNameByReadEndpoint, this.locationInfo.writeEndpoints.get(0), effectiveExcludedRegionsWithPartitionUnavailableRegions); } private UnmodifiableList<URI> getApplicableEndpoints( UnmodifiableList<URI> endpoints, UnmodifiableMap<URI, String> regionNameByEndpoint, URI fallbackEndpoint, List<String> excludeRegionList) { List<URI> applicableEndpoints = new ArrayList<>(); for (URI endpoint : endpoints) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (Utils.tryGetValue(regionNameByEndpoint, endpoint, regionName)) { if (!excludeRegionList.stream().anyMatch(regionName.v::equalsIgnoreCase)) { applicableEndpoints.add(endpoint); } } } if (applicableEndpoints.isEmpty()) { applicableEndpoints.add(fallbackEndpoint); } return new UnmodifiableList<>(applicableEndpoints); } private boolean isExcludeRegionsConfigured(List<String> excludedRegionsOnRequest, List<String> excludedRegionsOnClient) { boolean isExcludedRegionsConfiguredOnRequest = !(excludedRegionsOnRequest == null || excludedRegionsOnRequest.isEmpty()); boolean isExcludedRegionsConfiguredOnClient = !(excludedRegionsOnClient == null || excludedRegionsOnClient.isEmpty()); return isExcludedRegionsConfiguredOnRequest || isExcludedRegionsConfiguredOnClient; } public URI resolveFaultInjectionEndpoint(String region, boolean writeOnly) { Utils.ValueHolder<URI> endpointValueHolder = new Utils.ValueHolder<>(); if (writeOnly) { Utils.tryGetValue(this.locationInfo.availableWriteEndpointByLocation, region, endpointValueHolder); } else { Utils.tryGetValue(this.locationInfo.availableReadEndpointByLocation, region, endpointValueHolder); } if (endpointValueHolder.v != null) { return endpointValueHolder.v; } throw new IllegalArgumentException("Can not find service endpoint for region " + region); } public URI getDefaultEndpoint() { return this.defaultEndpoint; } public String getRegionName(URI locationEndpoint, com.azure.cosmos.implementation.OperationType operationType) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (operationType.isWriteOperation()) { if (Utils.tryGetValue(this.locationInfo.regionNameByWriteEndpoint, locationEndpoint, regionName)) { return regionName.v; } } else { if (Utils.tryGetValue(this.locationInfo.regionNameByReadEndpoint, locationEndpoint, regionName)) { return regionName.v; } } return this.locationInfo.availableWriteLocations.get(0).toLowerCase(Locale.ROOT); } private boolean areEqual(URI url1, URI url2) { return url1.equals(url2); } private void clearStaleEndpointUnavailabilityInfo() { if (!this.locationUnavailabilityInfoByEndpoint.isEmpty()) { List<URI> unavailableEndpoints = new ArrayList<>(this.locationUnavailabilityInfoByEndpoint.keySet()); for (URI unavailableEndpoint: unavailableEndpoints) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<LocationUnavailabilityInfo> removedHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, unavailabilityInfoHolder) && durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime) && Utils.tryRemove(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, removedHolder)) { logger.debug( "Removed endpoint [{}] unavailable for operations [{}] from unavailableEndpoints", unavailableEndpoint, unavailabilityInfoHolder.v.unavailableOperations); } } } } private boolean isEndpointUnavailable(URI endpoint, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); if (expectedAvailableOperations == OperationType.None || !Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, endpoint, unavailabilityInfoHolder) || !unavailabilityInfoHolder.v.unavailableOperations.supports(expectedAvailableOperations)) { return false; } else { if (durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime)) { return false; } else { logger.debug( "Endpoint [{}] unavailable for operations [{}] present in unavailableEndpoints", endpoint, unavailabilityInfoHolder.v.unavailableOperations); return true; } } } private boolean anyEndpointsAvailable(List<URI> endpoints, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); boolean anyEndpointsAvailable = false; for (URI endpoint : endpoints) { if (!isEndpointUnavailable(endpoint, expectedAvailableOperations)) { anyEndpointsAvailable = true; break; } } return anyEndpointsAvailable; } private void markEndpointUnavailable( URI unavailableEndpoint, OperationType unavailableOperationType) { Instant currentTime = Instant.now(); LocationUnavailabilityInfo updatedInfo = this.locationUnavailabilityInfoByEndpoint.compute( unavailableEndpoint, new BiFunction<URI, LocationUnavailabilityInfo, LocationUnavailabilityInfo>() { @Override public LocationUnavailabilityInfo apply(URI url, LocationUnavailabilityInfo info) { if (info == null) { return new LocationUnavailabilityInfo(currentTime, unavailableOperationType); } else { info.lastUnavailabilityCheckTimeStamp = currentTime; info.unavailableOperations = OperationType.combine(info.unavailableOperations, unavailableOperationType); return info; } } }); this.updateLocationCache(); logger.debug( "Endpoint [{}] unavailable for [{}] added/updated to unavailableEndpoints with timestamp [{}]", unavailableEndpoint, unavailableOperationType, updatedInfo.lastUnavailabilityCheckTimeStamp); } private void updateLocationCache(){ updateLocationCache(null, null, null, null); } private void updateLocationCache( Iterable<DatabaseAccountLocation> writeLocations, Iterable<DatabaseAccountLocation> readLocations, UnmodifiableList<String> preferenceList, Boolean enableMultipleWriteLocations) { synchronized (this.lockObject) { DatabaseAccountLocationsInfo nextLocationInfo = new DatabaseAccountLocationsInfo(this.locationInfo); logger.debug("updating location cache ..., current readLocations [{}], current writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); if (preferenceList != null) { nextLocationInfo.preferredLocations = preferenceList; } if (enableMultipleWriteLocations != null) { this.enableMultipleWriteLocations = enableMultipleWriteLocations; } this.clearStaleEndpointUnavailabilityInfo(); if (readLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableReadLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outReadRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByReadEndpoint); nextLocationInfo.availableReadEndpointByLocation = this.getEndpointByLocation(readLocations, out, outReadRegionMap); nextLocationInfo.availableReadLocations = out.v; nextLocationInfo.regionNameByReadEndpoint = outReadRegionMap.v; } if (writeLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableWriteLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outWriteRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByWriteEndpoint); nextLocationInfo.availableWriteEndpointByLocation = this.getEndpointByLocation(writeLocations, out, outWriteRegionMap); nextLocationInfo.availableWriteLocations = out.v; nextLocationInfo.regionNameByWriteEndpoint = outWriteRegionMap.v; } nextLocationInfo.writeEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableWriteEndpointByLocation, nextLocationInfo.availableWriteLocations, OperationType.Write, this.defaultEndpoint); nextLocationInfo.readEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableReadEndpointByLocation, nextLocationInfo.availableReadLocations, OperationType.Read, nextLocationInfo.writeEndpoints.get(0)); this.lastCacheUpdateTimestamp = Instant.now(); logger.debug("updating location cache finished, new readLocations [{}], new writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); this.locationInfo = nextLocationInfo; } } private UnmodifiableList<URI> getPreferredAvailableEndpoints(UnmodifiableMap<String, URI> endpointsByLocation, UnmodifiableList<String> orderedLocations, OperationType expectedAvailableOperation, URI fallbackEndpoint) { List<URI> endpoints = new ArrayList<>(); DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if (this.enableEndpointDiscovery) { if (this.canUseMultipleWriteLocations() || expectedAvailableOperation.supports(OperationType.Read)) { List<URI> unavailableEndpoints = new ArrayList<>(); if (currentLocationInfo.preferredLocations != null && !currentLocationInfo.preferredLocations.isEmpty()) { for (String location: currentLocationInfo.preferredLocations) { Utils.ValueHolder<URI> endpoint = new Utils.ValueHolder<>(); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.defaultEndpoint.equals(endpoint.v)) { endpoints = new ArrayList<>(); break; } if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } endpoints.addAll(unavailableEndpoints); } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (!Strings.isNullOrEmpty(location) && Utils.tryGetValue(endpointsByLocation, location, endpoint)) { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } return new UnmodifiableList<URI>(endpoints); } private UnmodifiableMap<String, URI> getEndpointByLocation(Iterable<DatabaseAccountLocation> locations, Utils.ValueHolder<UnmodifiableList<String>> orderedLocations, Utils.ValueHolder<UnmodifiableMap<URI, String>> regionMap) { Map<String, URI> endpointsByLocation = new CaseInsensitiveMap<>(); Map<URI, String> regionByEndpoint = new CaseInsensitiveMap<>(); List<String> parsedLocations = new ArrayList<>(); for (DatabaseAccountLocation location: locations) { if (!Strings.isNullOrEmpty(location.getName())) { try { URI endpoint = new URI(location.getEndpoint().toLowerCase(Locale.ROOT)); endpointsByLocation.put(location.getName().toLowerCase(Locale.ROOT), endpoint); regionByEndpoint.put(endpoint, location.getName().toLowerCase(Locale.ROOT)); parsedLocations.add(location.getName()); } catch (Exception e) { logger.warn("GetAvailableEndpointsByLocation() - skipping add for location = [{}] as it is location name is either empty or endpoint is malformed [{}]", location.getName(), location.getEndpoint()); } } } orderedLocations.v = new UnmodifiableList<String>(parsedLocations); regionMap.v = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(regionByEndpoint); return (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(endpointsByLocation); } public boolean canUseMultipleWriteLocations() { return this.useMultipleWriteLocations && this.enableMultipleWriteLocations; } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.canUseMultipleWriteLocations() && (request.getResourceType() == ResourceType.Document || (request.getResourceType() == ResourceType.StoredProcedure && request.getOperationType() == com.azure.cosmos.implementation.OperationType.ExecuteJavaScript)); } private static class LocationUnavailabilityInfo { LocationUnavailabilityInfo(Instant instant, OperationType type) { this.lastUnavailabilityCheckTimeStamp = instant; this.unavailableOperations = type; } public Instant lastUnavailabilityCheckTimeStamp; public OperationType unavailableOperations; } private enum OperationType { None(0x0), Read(0x1), Write(0x2), ReadAndWrite(0x3); private final int flag; public boolean hasReadFlag() { return (flag & Read.flag) != 0; } public boolean hasWriteFlag() { return (flag & Write.flag) != 0; } public static OperationType combine(OperationType t1, OperationType t2) { switch (t1.flag | t2.flag) { case 0x0: return None; case 0x1: return Read; case 0x2: return Write; default: return ReadAndWrite; } } public boolean supports(OperationType type) { return (flag & type.flag) != 0; } OperationType(int flag) { this.flag = flag; } } private boolean durationPassed(Instant end, Instant start, Duration duration) { return end.minus(duration).isAfter(start); } private boolean unavailableLocationsExpirationTimePassed() { return durationPassed(Instant.now(), this.lastCacheUpdateTimestamp, this.unavailableLocationsExpirationTime); } private static boolean isExcludedRegionsSupplierConfigured(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) { return excludedRegionsSupplier != null && excludedRegionsSupplier.get() != null; } static class DatabaseAccountLocationsInfo { private UnmodifiableList<String> preferredLocations; private UnmodifiableList<String> availableWriteLocations; private UnmodifiableList<String> availableReadLocations; private UnmodifiableMap<String, URI> availableWriteEndpointByLocation; private UnmodifiableMap<String, URI> availableReadEndpointByLocation; private UnmodifiableMap<URI, String> regionNameByWriteEndpoint; private UnmodifiableMap<URI, String> regionNameByReadEndpoint; private UnmodifiableList<URI> writeEndpoints; private UnmodifiableList<URI> readEndpoints; public DatabaseAccountLocationsInfo(List<String> preferredLocations, URI defaultEndpoint) { this.preferredLocations = new UnmodifiableList<>(preferredLocations.stream().map(loc -> loc.toLowerCase(Locale.ROOT)).collect(Collectors.toList())); this.availableWriteEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByWriteEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByReadEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteLocations = new UnmodifiableList<>(Collections.emptyList()); this.readEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); this.writeEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); } public DatabaseAccountLocationsInfo(DatabaseAccountLocationsInfo other) { this.preferredLocations = other.preferredLocations; this.availableWriteLocations = other.availableWriteLocations; this.availableReadLocations = other.availableReadLocations; this.availableWriteEndpointByLocation = other.availableWriteEndpointByLocation; this.regionNameByWriteEndpoint = other.regionNameByWriteEndpoint; this.regionNameByReadEndpoint = other.regionNameByReadEndpoint; this.availableReadEndpointByLocation = other.availableReadEndpointByLocation; this.writeEndpoints = other.writeEndpoints; this.readEndpoints = other.readEndpoints; } } }
class LocationCache { private final static Logger logger = LoggerFactory.getLogger(LocationCache.class); private final boolean enableEndpointDiscovery; private final URI defaultEndpoint; private final boolean useMultipleWriteLocations; private final Object lockObject; private final Duration unavailableLocationsExpirationTime; private final ConcurrentHashMap<URI, LocationUnavailabilityInfo> locationUnavailabilityInfoByEndpoint; private final ConnectionPolicy connectionPolicy; private DatabaseAccountLocationsInfo locationInfo; private Instant lastCacheUpdateTimestamp; private boolean enableMultipleWriteLocations; public LocationCache( ConnectionPolicy connectionPolicy, URI defaultEndpoint, Configs configs) { List<String> preferredLocations = new ArrayList<>(connectionPolicy.getPreferredRegions() != null ? connectionPolicy.getPreferredRegions() : Collections.emptyList() ); this.locationInfo = new DatabaseAccountLocationsInfo(preferredLocations, defaultEndpoint); this.defaultEndpoint = defaultEndpoint; this.enableEndpointDiscovery = connectionPolicy.isEndpointDiscoveryEnabled(); this.useMultipleWriteLocations = connectionPolicy.isMultipleWriteRegionsEnabled(); this.lockObject = new Object(); this.locationUnavailabilityInfoByEndpoint = new ConcurrentHashMap<>(); this.lastCacheUpdateTimestamp = Instant.MIN; this.enableMultipleWriteLocations = false; this.unavailableLocationsExpirationTime = Duration.ofSeconds(configs.getUnavailableLocationsExpirationTimeInSeconds()); this.connectionPolicy = connectionPolicy; } /** * Gets list of read endpoints ordered by * * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getReadEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.readEndpoints; } /** * Gets list of write endpoints ordered by * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getWriteEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.writeEndpoints; } /*** * Get the list of available read endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableReadEndpoints() { return this.locationInfo.availableReadEndpointByLocation.values().stream().collect(Collectors.toList()); } /*** * Get the list of available write endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableWriteEndpoints() { return this.locationInfo.availableWriteEndpointByLocation.values().stream().collect(Collectors.toList()); } public List<String> getEffectivePreferredLocations() { return this.locationInfo.effectivePreferredLocations; } /** * Marks the current location unavailable for read */ public void markEndpointUnavailableForRead(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Read); } /** * Marks the current location unavailable for write */ public void markEndpointUnavailableForWrite(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Write); } /** * Invoked when {@link DatabaseAccount} is read * @param databaseAccount READ DatabaseAccount */ public void onDatabaseAccountRead(DatabaseAccount databaseAccount) { this.updateLocationCache( databaseAccount.getWritableLocations(), databaseAccount.getReadableLocations(), null, BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); } void onLocationPreferenceChanged(UnmodifiableList<String> preferredLocations) { this.updateLocationCache( null, null , preferredLocations, null); } /** * Resolves request to service endpoint. * 1. If this is a write request * (a) If UseMultipleWriteLocations = true * (i) For document writes, resolve to most preferred and available write endpoint. * Once the endpoint is marked unavailable, it is moved to the end of available write endpoint. Current request will * be retried on next preferred available write endpoint. * (ii) For all other resources, always resolve to first/second (regardless of preferred locations) * write getEndpoint in {@link DatabaseAccount * Endpoint of first write location in {@link DatabaseAccount * write operation on all resource types (except during that region's failover). * Only during manual failover, client would retry write on second write location in {@link DatabaseAccount * (b) Else resolve the request to first write getEndpoint in {@link DatabaseAccount * second write getEndpoint in {@link DatabaseAccount * 2. Else resolve the request to most preferred available read getEndpoint (getAutomatic failover for read requests) * @param request Request for which getEndpoint is to be resolved * @return Resolved getEndpoint */ public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { Objects.requireNonNull(request.requestContext, "RxDocumentServiceRequest.requestContext is required and cannot be null."); if(request.requestContext.locationEndpointToRoute != null) { return request.requestContext.locationEndpointToRoute; } int locationIndex = Utils.getValueOrDefault(request.requestContext.locationIndexToRoute, 0); boolean usePreferredLocations = request.requestContext.usePreferredLocations != null ? request.requestContext.usePreferredLocations : true; if(!usePreferredLocations || (request.getOperationType().isWriteOperation() && !this.canUseMultipleWriteLocations(request))) { DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if(this.enableEndpointDiscovery && currentLocationInfo.availableWriteLocations.size() > 0) { locationIndex = Math.min(locationIndex%2, currentLocationInfo.availableWriteLocations.size()-1); String writeLocation = currentLocationInfo.availableWriteLocations.get(locationIndex); return currentLocationInfo.availableWriteEndpointByLocation.get(writeLocation); } else { return this.defaultEndpoint; } } else { UnmodifiableList<URI> endpoints = request.getOperationType().isWriteOperation()? this.getApplicableWriteEndpoints(request) : this.getApplicableReadEndpoints(request); return endpoints.get(locationIndex % endpoints.size()); } } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.getApplicableWriteEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> writeEndpoints = this.getWriteEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return writeEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( writeEndpoints, this.locationInfo.regionNameByWriteEndpoint, this.defaultEndpoint, effectiveExcludedRegionsWithPartitionUnavailableRegions); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.getApplicableReadEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> readEndpoints = this.getReadEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return readEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( readEndpoints, this.locationInfo.regionNameByReadEndpoint, this.locationInfo.writeEndpoints.get(0), effectiveExcludedRegionsWithPartitionUnavailableRegions); } private UnmodifiableList<URI> getApplicableEndpoints( UnmodifiableList<URI> endpoints, UnmodifiableMap<URI, String> regionNameByEndpoint, URI fallbackEndpoint, List<String> excludeRegionList) { List<URI> applicableEndpoints = new ArrayList<>(); for (URI endpoint : endpoints) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (Utils.tryGetValue(regionNameByEndpoint, endpoint, regionName)) { if (!excludeRegionList.stream().anyMatch(regionName.v::equalsIgnoreCase)) { applicableEndpoints.add(endpoint); } } } if (applicableEndpoints.isEmpty()) { applicableEndpoints.add(fallbackEndpoint); } return new UnmodifiableList<>(applicableEndpoints); } private boolean isExcludeRegionsConfigured(List<String> excludedRegionsOnRequest, List<String> excludedRegionsOnClient) { boolean isExcludedRegionsConfiguredOnRequest = !(excludedRegionsOnRequest == null || excludedRegionsOnRequest.isEmpty()); boolean isExcludedRegionsConfiguredOnClient = !(excludedRegionsOnClient == null || excludedRegionsOnClient.isEmpty()); return isExcludedRegionsConfiguredOnRequest || isExcludedRegionsConfiguredOnClient; } public URI resolveFaultInjectionEndpoint(String region, boolean writeOnly) { Utils.ValueHolder<URI> endpointValueHolder = new Utils.ValueHolder<>(); if (writeOnly) { Utils.tryGetValue(this.locationInfo.availableWriteEndpointByLocation, region, endpointValueHolder); } else { Utils.tryGetValue(this.locationInfo.availableReadEndpointByLocation, region, endpointValueHolder); } if (endpointValueHolder.v != null) { return endpointValueHolder.v; } throw new IllegalArgumentException("Can not find service endpoint for region " + region); } public URI getDefaultEndpoint() { return this.defaultEndpoint; } public String getRegionName(URI locationEndpoint, com.azure.cosmos.implementation.OperationType operationType) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (operationType.isWriteOperation()) { if (Utils.tryGetValue(this.locationInfo.regionNameByWriteEndpoint, locationEndpoint, regionName)) { return regionName.v; } } else { if (Utils.tryGetValue(this.locationInfo.regionNameByReadEndpoint, locationEndpoint, regionName)) { return regionName.v; } } return this.locationInfo.availableWriteLocations.get(0).toLowerCase(Locale.ROOT); } private boolean areEqual(URI url1, URI url2) { return url1.equals(url2); } private void clearStaleEndpointUnavailabilityInfo() { if (!this.locationUnavailabilityInfoByEndpoint.isEmpty()) { List<URI> unavailableEndpoints = new ArrayList<>(this.locationUnavailabilityInfoByEndpoint.keySet()); for (URI unavailableEndpoint: unavailableEndpoints) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<LocationUnavailabilityInfo> removedHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, unavailabilityInfoHolder) && durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime) && Utils.tryRemove(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, removedHolder)) { logger.debug( "Removed endpoint [{}] unavailable for operations [{}] from unavailableEndpoints", unavailableEndpoint, unavailabilityInfoHolder.v.unavailableOperations); } } } } private boolean isEndpointUnavailable(URI endpoint, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); if (expectedAvailableOperations == OperationType.None || !Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, endpoint, unavailabilityInfoHolder) || !unavailabilityInfoHolder.v.unavailableOperations.supports(expectedAvailableOperations)) { return false; } else { if (durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime)) { return false; } else { logger.debug( "Endpoint [{}] unavailable for operations [{}] present in unavailableEndpoints", endpoint, unavailabilityInfoHolder.v.unavailableOperations); return true; } } } private boolean anyEndpointsAvailable(List<URI> endpoints, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); boolean anyEndpointsAvailable = false; for (URI endpoint : endpoints) { if (!isEndpointUnavailable(endpoint, expectedAvailableOperations)) { anyEndpointsAvailable = true; break; } } return anyEndpointsAvailable; } private void markEndpointUnavailable( URI unavailableEndpoint, OperationType unavailableOperationType) { Instant currentTime = Instant.now(); LocationUnavailabilityInfo updatedInfo = this.locationUnavailabilityInfoByEndpoint.compute( unavailableEndpoint, new BiFunction<URI, LocationUnavailabilityInfo, LocationUnavailabilityInfo>() { @Override public LocationUnavailabilityInfo apply(URI url, LocationUnavailabilityInfo info) { if (info == null) { return new LocationUnavailabilityInfo(currentTime, unavailableOperationType); } else { info.lastUnavailabilityCheckTimeStamp = currentTime; info.unavailableOperations = OperationType.combine(info.unavailableOperations, unavailableOperationType); return info; } } }); this.updateLocationCache(); logger.debug( "Endpoint [{}] unavailable for [{}] added/updated to unavailableEndpoints with timestamp [{}]", unavailableEndpoint, unavailableOperationType, updatedInfo.lastUnavailabilityCheckTimeStamp); } private void updateLocationCache(){ updateLocationCache(null, null, null, null); } private void updateLocationCache( Iterable<DatabaseAccountLocation> writeLocations, Iterable<DatabaseAccountLocation> readLocations, UnmodifiableList<String> preferenceList, Boolean enableMultipleWriteLocations) { synchronized (this.lockObject) { DatabaseAccountLocationsInfo nextLocationInfo = new DatabaseAccountLocationsInfo(this.locationInfo); logger.debug("updating location cache ..., current readLocations [{}], current writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); if (preferenceList != null) { nextLocationInfo.preferredLocations = preferenceList; } if (enableMultipleWriteLocations != null) { this.enableMultipleWriteLocations = enableMultipleWriteLocations; } this.clearStaleEndpointUnavailabilityInfo(); if (readLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableReadLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outReadRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByReadEndpoint); nextLocationInfo.availableReadEndpointByLocation = this.getEndpointByLocation(readLocations, out, outReadRegionMap); nextLocationInfo.availableReadLocations = out.v; nextLocationInfo.regionNameByReadEndpoint = outReadRegionMap.v; } if (writeLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableWriteLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outWriteRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByWriteEndpoint); nextLocationInfo.availableWriteEndpointByLocation = this.getEndpointByLocation(writeLocations, out, outWriteRegionMap); nextLocationInfo.availableWriteLocations = out.v; nextLocationInfo.regionNameByWriteEndpoint = outWriteRegionMap.v; } nextLocationInfo.writeEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableWriteEndpointByLocation, nextLocationInfo.availableWriteLocations, OperationType.Write, this.defaultEndpoint); nextLocationInfo.readEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableReadEndpointByLocation, nextLocationInfo.availableReadLocations, OperationType.Read, nextLocationInfo.writeEndpoints.get(0)); if (nextLocationInfo.preferredLocations == null || nextLocationInfo.preferredLocations.isEmpty()) { Utils.ValueHolder<String> regionForDefaultEndpoint = new Utils.ValueHolder<>(); if (!Utils.tryGetValue(nextLocationInfo.regionNameByReadEndpoint, this.defaultEndpoint, regionForDefaultEndpoint)) { nextLocationInfo.effectivePreferredLocations = nextLocationInfo.availableReadLocations; } } this.lastCacheUpdateTimestamp = Instant.now(); logger.debug("updating location cache finished, new readLocations [{}], new writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); this.locationInfo = nextLocationInfo; } } private UnmodifiableList<URI> getPreferredAvailableEndpoints(UnmodifiableMap<String, URI> endpointsByLocation, UnmodifiableList<String> orderedLocations, OperationType expectedAvailableOperation, URI fallbackEndpoint) { List<URI> endpoints = new ArrayList<>(); DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if (this.enableEndpointDiscovery) { if (this.canUseMultipleWriteLocations() || expectedAvailableOperation.supports(OperationType.Read)) { List<URI> unavailableEndpoints = new ArrayList<>(); if (currentLocationInfo.preferredLocations != null && !currentLocationInfo.preferredLocations.isEmpty()) { for (String location: currentLocationInfo.preferredLocations) { Utils.ValueHolder<URI> endpoint = new Utils.ValueHolder<>(); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.defaultEndpoint.equals(endpoint.v)) { endpoints = new ArrayList<>(); break; } if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } endpoints.addAll(unavailableEndpoints); } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (!Strings.isNullOrEmpty(location) && Utils.tryGetValue(endpointsByLocation, location, endpoint)) { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } return new UnmodifiableList<URI>(endpoints); } private UnmodifiableMap<String, URI> getEndpointByLocation(Iterable<DatabaseAccountLocation> locations, Utils.ValueHolder<UnmodifiableList<String>> orderedLocations, Utils.ValueHolder<UnmodifiableMap<URI, String>> regionMap) { Map<String, URI> endpointsByLocation = new CaseInsensitiveMap<>(); Map<URI, String> regionByEndpoint = new CaseInsensitiveMap<>(); List<String> parsedLocations = new ArrayList<>(); for (DatabaseAccountLocation location: locations) { if (!Strings.isNullOrEmpty(location.getName())) { try { URI endpoint = new URI(location.getEndpoint().toLowerCase(Locale.ROOT)); endpointsByLocation.put(location.getName().toLowerCase(Locale.ROOT), endpoint); regionByEndpoint.put(endpoint, location.getName().toLowerCase(Locale.ROOT)); parsedLocations.add(location.getName()); } catch (Exception e) { logger.warn("GetAvailableEndpointsByLocation() - skipping add for location = [{}] as it is location name is either empty or endpoint is malformed [{}]", location.getName(), location.getEndpoint()); } } } orderedLocations.v = new UnmodifiableList<String>(parsedLocations); regionMap.v = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(regionByEndpoint); return (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(endpointsByLocation); } public boolean canUseMultipleWriteLocations() { return this.useMultipleWriteLocations && this.enableMultipleWriteLocations; } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.canUseMultipleWriteLocations() && (request.getResourceType() == ResourceType.Document || (request.getResourceType() == ResourceType.StoredProcedure && request.getOperationType() == com.azure.cosmos.implementation.OperationType.ExecuteJavaScript)); } private static class LocationUnavailabilityInfo { LocationUnavailabilityInfo(Instant instant, OperationType type) { this.lastUnavailabilityCheckTimeStamp = instant; this.unavailableOperations = type; } public Instant lastUnavailabilityCheckTimeStamp; public OperationType unavailableOperations; } private enum OperationType { None(0x0), Read(0x1), Write(0x2), ReadAndWrite(0x3); private final int flag; public boolean hasReadFlag() { return (flag & Read.flag) != 0; } public boolean hasWriteFlag() { return (flag & Write.flag) != 0; } public static OperationType combine(OperationType t1, OperationType t2) { switch (t1.flag | t2.flag) { case 0x0: return None; case 0x1: return Read; case 0x2: return Write; default: return ReadAndWrite; } } public boolean supports(OperationType type) { return (flag & type.flag) != 0; } OperationType(int flag) { this.flag = flag; } } private boolean durationPassed(Instant end, Instant start, Duration duration) { return end.minus(duration).isAfter(start); } private boolean unavailableLocationsExpirationTimePassed() { return durationPassed(Instant.now(), this.lastCacheUpdateTimestamp, this.unavailableLocationsExpirationTime); } private static boolean isExcludedRegionsSupplierConfigured(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) { return excludedRegionsSupplier != null && excludedRegionsSupplier.get() != null; } static class DatabaseAccountLocationsInfo { private UnmodifiableList<String> preferredLocations; private UnmodifiableList<String> effectivePreferredLocations; private UnmodifiableList<String> availableWriteLocations; private UnmodifiableList<String> availableReadLocations; private UnmodifiableMap<String, URI> availableWriteEndpointByLocation; private UnmodifiableMap<String, URI> availableReadEndpointByLocation; private UnmodifiableMap<URI, String> regionNameByWriteEndpoint; private UnmodifiableMap<URI, String> regionNameByReadEndpoint; private UnmodifiableList<URI> writeEndpoints; private UnmodifiableList<URI> readEndpoints; public DatabaseAccountLocationsInfo(List<String> preferredLocations, URI defaultEndpoint) { this.preferredLocations = new UnmodifiableList<>(preferredLocations.stream().map(loc -> loc.toLowerCase(Locale.ROOT)).collect(Collectors.toList())); this.effectivePreferredLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByWriteEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByReadEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteLocations = new UnmodifiableList<>(Collections.emptyList()); this.readEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); this.writeEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); } public DatabaseAccountLocationsInfo(DatabaseAccountLocationsInfo other) { this.preferredLocations = other.preferredLocations; this.effectivePreferredLocations = other.effectivePreferredLocations; this.availableWriteLocations = other.availableWriteLocations; this.availableReadLocations = other.availableReadLocations; this.availableWriteEndpointByLocation = other.availableWriteEndpointByLocation; this.regionNameByWriteEndpoint = other.regionNameByWriteEndpoint; this.regionNameByReadEndpoint = other.regionNameByReadEndpoint; this.availableReadEndpointByLocation = other.availableReadEndpointByLocation; this.writeEndpoints = other.writeEndpoints; this.readEndpoints = other.readEndpoints; } } }
Yeah - the `DatabaseAccount` / `LocationCache` refresh part is greatly simplified now.
public boolean shouldRefreshEndpoints(Utils.ValueHolder<Boolean> canRefreshInBackground) { canRefreshInBackground.v = true; DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; String mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.preferredLocations); String mostPreferredEffectiveReadLocation = StringUtils.EMPTY; String mostPreferredEffectiveWriteLocation = StringUtils.EMPTY; if (StringUtils.isEmpty(mostPreferredLocation)) { URI firstAccountLevelReadEndpoint = Utils.firstOrDefault(currentLocationInfo.readEndpoints); if (currentLocationInfo.regionNameByReadEndpoint != null) { mostPreferredEffectiveReadLocation = currentLocationInfo.regionNameByReadEndpoint.getOrDefault(firstAccountLevelReadEndpoint, StringUtils.EMPTY); } URI firstAccountLevelWriteEndpoint = Utils.firstOrDefault(currentLocationInfo.writeEndpoints); if (currentLocationInfo.regionNameByWriteEndpoint != null) { mostPreferredEffectiveWriteLocation = currentLocationInfo.regionNameByWriteEndpoint.getOrDefault(firstAccountLevelWriteEndpoint, StringUtils.EMPTY); } } String mostPreferredReadLocationToUse = !Strings.isNullOrEmpty(mostPreferredLocation) ? mostPreferredLocation : mostPreferredEffectiveReadLocation; String mostPreferredWriteLocationToUse = !Strings.isNullOrEmpty(mostPreferredLocation) ? mostPreferredLocation : mostPreferredEffectiveWriteLocation; if (this.enableEndpointDiscovery) { boolean shouldRefresh = this.useMultipleWriteLocations && !this.enableMultipleWriteLocations; List<URI> readLocationEndpoints = currentLocationInfo.readEndpoints; if (this.isEndpointUnavailable(readLocationEndpoints.get(0), OperationType.Read)) { canRefreshInBackground.v = anyEndpointsAvailable(readLocationEndpoints, OperationType.Read); logger.debug("shouldRefreshEndpoints = true, since the first read endpoint " + "[{}] is not available for read. canRefreshInBackground = [{}]", readLocationEndpoints.get(0), canRefreshInBackground.v); return true; } if (!Strings.isNullOrEmpty(mostPreferredReadLocationToUse)) { Utils.ValueHolder<URI> mostPreferredReadEndpointHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<URI> firstAccountLevelReadEndpointHolder = new Utils.ValueHolder<>(); logger.debug("getReadEndpoints [{}]", readLocationEndpoints); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, mostPreferredReadLocationToUse, mostPreferredReadEndpointHolder)) { logger.debug("most preferred is [{}], most preferred available is [{}]", mostPreferredReadLocationToUse, mostPreferredReadEndpointHolder.v); if (!areEqual(mostPreferredReadEndpointHolder.v, readLocationEndpoints.get(0))) { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}]" + " is not available for read.", mostPreferredReadLocationToUse); return true; } else if (currentLocationInfo.preferredLocations == null || currentLocationInfo.preferredLocations.isEmpty()) { List<String> accountLevelReadRegions = currentLocationInfo.availableReadLocations; String firstAccountLevelReadRegion = Utils.firstOrDefault(accountLevelReadRegions); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, firstAccountLevelReadRegion, firstAccountLevelReadEndpointHolder)) { if (!areEqual(mostPreferredReadEndpointHolder.v, firstAccountLevelReadEndpointHolder.v)) { logger.debug("shouldRefreshEndpoints = true, preferredRegions on client is empty and first account-level read region [{}]" + " is not available for read.", firstAccountLevelReadRegion); return true; } } } logger.debug("most preferred is [{}], and most preferred available [{}] are the same", mostPreferredLocation, mostPreferredReadEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] " + "is not in available read locations.", mostPreferredLocation); return true; } } Utils.ValueHolder<URI> mostPreferredWriteEndpointHolder = new Utils.ValueHolder<>(); List<URI> writeLocationEndpoints = currentLocationInfo.writeEndpoints; logger.debug("getWriteEndpoints [{}]", writeLocationEndpoints); if (!this.canUseMultipleWriteLocations()) { if (this.isEndpointUnavailable(writeLocationEndpoints.get(0), OperationType.Write)) { canRefreshInBackground.v = anyEndpointsAvailable(writeLocationEndpoints,OperationType.Write); logger.debug("shouldRefreshEndpoints = true, most preferred location " + "[{}] endpoint [{}] is not available for write. canRefreshInBackground = [{}]", mostPreferredLocation, writeLocationEndpoints.get(0), canRefreshInBackground.v); return true; } else { logger.debug("shouldRefreshEndpoints: false, [{}] is available for Write", writeLocationEndpoints.get(0)); return shouldRefresh; } } else if (!Strings.isNullOrEmpty(mostPreferredWriteLocationToUse)) { if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, mostPreferredWriteLocationToUse, mostPreferredWriteEndpointHolder)) { shouldRefresh = ! areEqual(mostPreferredWriteEndpointHolder.v, writeLocationEndpoints.get(0)); if (currentLocationInfo.preferredLocations == null || currentLocationInfo.preferredLocations.isEmpty()) { List<String> accountLevelWriteRegions = currentLocationInfo.availableWriteLocations; String firstAccountLevelWriteRegion = Utils.firstOrDefault(accountLevelWriteRegions); Utils.ValueHolder<URI> firstAccountLevelWriteEndpointHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, firstAccountLevelWriteRegion, firstAccountLevelWriteEndpointHolder)) { if (!areEqual(mostPreferredWriteEndpointHolder.v, firstAccountLevelWriteEndpointHolder.v)) { logger.debug("shouldRefreshEndpoints = true, preferredRegions on client is empty and first account-level write region [{}]" + " is not available for write.", firstAccountLevelWriteRegion); shouldRefresh = true; } } } if (shouldRefresh) { logger.debug("shouldRefreshEndpoints: true, write endpoint [{}] is not the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints: false, write endpoint [{}] is the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } return shouldRefresh; } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] is not in available write locations", mostPreferredLocation); return true; } } else { logger.debug("shouldRefreshEndpoints: false, mostPreferredLocation [{}] is empty", mostPreferredLocation); return shouldRefresh; } } else { logger.debug("shouldRefreshEndpoints: false, endpoint discovery not enabled"); return false; } }
String mostPreferredEffectiveReadLocation = StringUtils.EMPTY;
public boolean shouldRefreshEndpoints(Utils.ValueHolder<Boolean> canRefreshInBackground) { canRefreshInBackground.v = true; DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; String mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.preferredLocations); if (StringUtils.isEmpty(mostPreferredLocation)) { mostPreferredLocation = Utils.firstOrDefault(currentLocationInfo.effectivePreferredLocations); } if (this.enableEndpointDiscovery) { boolean shouldRefresh = this.useMultipleWriteLocations && !this.enableMultipleWriteLocations; List<URI> readLocationEndpoints = currentLocationInfo.readEndpoints; if (this.isEndpointUnavailable(readLocationEndpoints.get(0), OperationType.Read)) { canRefreshInBackground.v = anyEndpointsAvailable(readLocationEndpoints,OperationType.Read); logger.debug("shouldRefreshEndpoints = true, since the first read endpoint " + "[{}] is not available for read. canRefreshInBackground = [{}]", readLocationEndpoints.get(0), canRefreshInBackground.v); return true; } if (!Strings.isNullOrEmpty(mostPreferredLocation)) { Utils.ValueHolder<URI> mostPreferredReadEndpointHolder = new Utils.ValueHolder<>(); logger.debug("getReadEndpoints [{}]", readLocationEndpoints); if (Utils.tryGetValue(currentLocationInfo.availableReadEndpointByLocation, mostPreferredLocation, mostPreferredReadEndpointHolder)) { logger.debug("most preferred is [{}], most preferred available is [{}]", mostPreferredLocation, mostPreferredReadEndpointHolder.v); if (!areEqual(mostPreferredReadEndpointHolder.v, readLocationEndpoints.get(0))) { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}]" + " is not available for read.", mostPreferredLocation); return true; } logger.debug("most preferred is [{}], and most preferred available [{}] are the same", mostPreferredLocation, mostPreferredReadEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] " + "is not in available read locations.", mostPreferredLocation); return true; } } Utils.ValueHolder<URI> mostPreferredWriteEndpointHolder = new Utils.ValueHolder<>(); List<URI> writeLocationEndpoints = currentLocationInfo.writeEndpoints; logger.debug("getWriteEndpoints [{}]", writeLocationEndpoints); if (!this.canUseMultipleWriteLocations()) { if (this.isEndpointUnavailable(writeLocationEndpoints.get(0), OperationType.Write)) { canRefreshInBackground.v = anyEndpointsAvailable(writeLocationEndpoints,OperationType.Write); logger.debug("shouldRefreshEndpoints = true, most preferred location " + "[{}] endpoint [{}] is not available for write. canRefreshInBackground = [{}]", mostPreferredLocation, writeLocationEndpoints.get(0), canRefreshInBackground.v); return true; } else { logger.debug("shouldRefreshEndpoints: false, [{}] is available for Write", writeLocationEndpoints.get(0)); return shouldRefresh; } } else if (!Strings.isNullOrEmpty(mostPreferredLocation)) { if (Utils.tryGetValue(currentLocationInfo.availableWriteEndpointByLocation, mostPreferredLocation, mostPreferredWriteEndpointHolder)) { shouldRefresh = ! areEqual(mostPreferredWriteEndpointHolder.v,writeLocationEndpoints.get(0)); if (shouldRefresh) { logger.debug("shouldRefreshEndpoints: true, write endpoint [{}] is not the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } else { logger.debug("shouldRefreshEndpoints: false, write endpoint [{}] is the same as most preferred [{}]", writeLocationEndpoints.get(0), mostPreferredWriteEndpointHolder.v); } return shouldRefresh; } else { logger.debug("shouldRefreshEndpoints = true, most preferred location [{}] is not in available write locations", mostPreferredLocation); return true; } } else { logger.debug("shouldRefreshEndpoints: false, mostPreferredLocation [{}] is empty", mostPreferredLocation); return shouldRefresh; } } else { logger.debug("shouldRefreshEndpoints: false, endpoint discovery not enabled"); return false; } }
class LocationCache { private final static Logger logger = LoggerFactory.getLogger(LocationCache.class); private final boolean enableEndpointDiscovery; private final URI defaultEndpoint; private final boolean useMultipleWriteLocations; private final Object lockObject; private final Duration unavailableLocationsExpirationTime; private final ConcurrentHashMap<URI, LocationUnavailabilityInfo> locationUnavailabilityInfoByEndpoint; private final ConnectionPolicy connectionPolicy; private DatabaseAccountLocationsInfo locationInfo; private Instant lastCacheUpdateTimestamp; private boolean enableMultipleWriteLocations; public LocationCache( ConnectionPolicy connectionPolicy, URI defaultEndpoint, Configs configs) { List<String> preferredLocations = new ArrayList<>(connectionPolicy.getPreferredRegions() != null ? connectionPolicy.getPreferredRegions() : Collections.emptyList() ); this.locationInfo = new DatabaseAccountLocationsInfo(preferredLocations, defaultEndpoint); this.defaultEndpoint = defaultEndpoint; this.enableEndpointDiscovery = connectionPolicy.isEndpointDiscoveryEnabled(); this.useMultipleWriteLocations = connectionPolicy.isMultipleWriteRegionsEnabled(); this.lockObject = new Object(); this.locationUnavailabilityInfoByEndpoint = new ConcurrentHashMap<>(); this.lastCacheUpdateTimestamp = Instant.MIN; this.enableMultipleWriteLocations = false; this.unavailableLocationsExpirationTime = Duration.ofSeconds(configs.getUnavailableLocationsExpirationTimeInSeconds()); this.connectionPolicy = connectionPolicy; } /** * Gets list of read endpoints ordered by * * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getReadEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.readEndpoints; } /** * Gets list of write endpoints ordered by * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getWriteEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.writeEndpoints; } /*** * Get the list of available read endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableReadEndpoints() { return this.locationInfo.availableReadEndpointByLocation.values().stream().collect(Collectors.toList()); } /*** * Get the list of available write endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableWriteEndpoints() { return this.locationInfo.availableWriteEndpointByLocation.values().stream().collect(Collectors.toList()); } /** * Marks the current location unavailable for read */ public void markEndpointUnavailableForRead(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Read); } /** * Marks the current location unavailable for write */ public void markEndpointUnavailableForWrite(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Write); } /** * Invoked when {@link DatabaseAccount} is read * @param databaseAccount READ DatabaseAccount */ public void onDatabaseAccountRead(DatabaseAccount databaseAccount) { this.updateLocationCache( databaseAccount.getWritableLocations(), databaseAccount.getReadableLocations(), null, BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); } void onLocationPreferenceChanged(UnmodifiableList<String> preferredLocations) { this.updateLocationCache( null, null , preferredLocations, null); } /** * Resolves request to service endpoint. * 1. If this is a write request * (a) If UseMultipleWriteLocations = true * (i) For document writes, resolve to most preferred and available write endpoint. * Once the endpoint is marked unavailable, it is moved to the end of available write endpoint. Current request will * be retried on next preferred available write endpoint. * (ii) For all other resources, always resolve to first/second (regardless of preferred locations) * write getEndpoint in {@link DatabaseAccount * Endpoint of first write location in {@link DatabaseAccount * write operation on all resource types (except during that region's failover). * Only during manual failover, client would retry write on second write location in {@link DatabaseAccount * (b) Else resolve the request to first write getEndpoint in {@link DatabaseAccount * second write getEndpoint in {@link DatabaseAccount * 2. Else resolve the request to most preferred available read getEndpoint (getAutomatic failover for read requests) * @param request Request for which getEndpoint is to be resolved * @return Resolved getEndpoint */ public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { Objects.requireNonNull(request.requestContext, "RxDocumentServiceRequest.requestContext is required and cannot be null."); if(request.requestContext.locationEndpointToRoute != null) { return request.requestContext.locationEndpointToRoute; } int locationIndex = Utils.getValueOrDefault(request.requestContext.locationIndexToRoute, 0); boolean usePreferredLocations = request.requestContext.usePreferredLocations != null ? request.requestContext.usePreferredLocations : true; if(!usePreferredLocations || (request.getOperationType().isWriteOperation() && !this.canUseMultipleWriteLocations(request))) { DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if(this.enableEndpointDiscovery && currentLocationInfo.availableWriteLocations.size() > 0) { locationIndex = Math.min(locationIndex%2, currentLocationInfo.availableWriteLocations.size()-1); String writeLocation = currentLocationInfo.availableWriteLocations.get(locationIndex); return currentLocationInfo.availableWriteEndpointByLocation.get(writeLocation); } else { return this.defaultEndpoint; } } else { UnmodifiableList<URI> endpoints = request.getOperationType().isWriteOperation()? this.getApplicableWriteEndpoints(request) : this.getApplicableReadEndpoints(request); return endpoints.get(locationIndex % endpoints.size()); } } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.getApplicableWriteEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> writeEndpoints = this.getWriteEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return writeEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( writeEndpoints, this.locationInfo.regionNameByWriteEndpoint, this.defaultEndpoint, effectiveExcludedRegionsWithPartitionUnavailableRegions); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.getApplicableReadEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> readEndpoints = this.getReadEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return readEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( readEndpoints, this.locationInfo.regionNameByReadEndpoint, this.locationInfo.writeEndpoints.get(0), effectiveExcludedRegionsWithPartitionUnavailableRegions); } private UnmodifiableList<URI> getApplicableEndpoints( UnmodifiableList<URI> endpoints, UnmodifiableMap<URI, String> regionNameByEndpoint, URI fallbackEndpoint, List<String> excludeRegionList) { List<URI> applicableEndpoints = new ArrayList<>(); for (URI endpoint : endpoints) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (Utils.tryGetValue(regionNameByEndpoint, endpoint, regionName)) { if (!excludeRegionList.stream().anyMatch(regionName.v::equalsIgnoreCase)) { applicableEndpoints.add(endpoint); } } } if (applicableEndpoints.isEmpty()) { applicableEndpoints.add(fallbackEndpoint); } return new UnmodifiableList<>(applicableEndpoints); } private boolean isExcludeRegionsConfigured(List<String> excludedRegionsOnRequest, List<String> excludedRegionsOnClient) { boolean isExcludedRegionsConfiguredOnRequest = !(excludedRegionsOnRequest == null || excludedRegionsOnRequest.isEmpty()); boolean isExcludedRegionsConfiguredOnClient = !(excludedRegionsOnClient == null || excludedRegionsOnClient.isEmpty()); return isExcludedRegionsConfiguredOnRequest || isExcludedRegionsConfiguredOnClient; } public URI resolveFaultInjectionEndpoint(String region, boolean writeOnly) { Utils.ValueHolder<URI> endpointValueHolder = new Utils.ValueHolder<>(); if (writeOnly) { Utils.tryGetValue(this.locationInfo.availableWriteEndpointByLocation, region, endpointValueHolder); } else { Utils.tryGetValue(this.locationInfo.availableReadEndpointByLocation, region, endpointValueHolder); } if (endpointValueHolder.v != null) { return endpointValueHolder.v; } throw new IllegalArgumentException("Can not find service endpoint for region " + region); } public URI getDefaultEndpoint() { return this.defaultEndpoint; } public String getRegionName(URI locationEndpoint, com.azure.cosmos.implementation.OperationType operationType) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (operationType.isWriteOperation()) { if (Utils.tryGetValue(this.locationInfo.regionNameByWriteEndpoint, locationEndpoint, regionName)) { return regionName.v; } } else { if (Utils.tryGetValue(this.locationInfo.regionNameByReadEndpoint, locationEndpoint, regionName)) { return regionName.v; } } return this.locationInfo.availableWriteLocations.get(0).toLowerCase(Locale.ROOT); } private boolean areEqual(URI url1, URI url2) { return url1.equals(url2); } private void clearStaleEndpointUnavailabilityInfo() { if (!this.locationUnavailabilityInfoByEndpoint.isEmpty()) { List<URI> unavailableEndpoints = new ArrayList<>(this.locationUnavailabilityInfoByEndpoint.keySet()); for (URI unavailableEndpoint: unavailableEndpoints) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<LocationUnavailabilityInfo> removedHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, unavailabilityInfoHolder) && durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime) && Utils.tryRemove(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, removedHolder)) { logger.debug( "Removed endpoint [{}] unavailable for operations [{}] from unavailableEndpoints", unavailableEndpoint, unavailabilityInfoHolder.v.unavailableOperations); } } } } private boolean isEndpointUnavailable(URI endpoint, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); if (expectedAvailableOperations == OperationType.None || !Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, endpoint, unavailabilityInfoHolder) || !unavailabilityInfoHolder.v.unavailableOperations.supports(expectedAvailableOperations)) { return false; } else { if (durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime)) { return false; } else { logger.debug( "Endpoint [{}] unavailable for operations [{}] present in unavailableEndpoints", endpoint, unavailabilityInfoHolder.v.unavailableOperations); return true; } } } private boolean anyEndpointsAvailable(List<URI> endpoints, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); boolean anyEndpointsAvailable = false; for (URI endpoint : endpoints) { if (!isEndpointUnavailable(endpoint, expectedAvailableOperations)) { anyEndpointsAvailable = true; break; } } return anyEndpointsAvailable; } private void markEndpointUnavailable( URI unavailableEndpoint, OperationType unavailableOperationType) { Instant currentTime = Instant.now(); LocationUnavailabilityInfo updatedInfo = this.locationUnavailabilityInfoByEndpoint.compute( unavailableEndpoint, new BiFunction<URI, LocationUnavailabilityInfo, LocationUnavailabilityInfo>() { @Override public LocationUnavailabilityInfo apply(URI url, LocationUnavailabilityInfo info) { if (info == null) { return new LocationUnavailabilityInfo(currentTime, unavailableOperationType); } else { info.lastUnavailabilityCheckTimeStamp = currentTime; info.unavailableOperations = OperationType.combine(info.unavailableOperations, unavailableOperationType); return info; } } }); this.updateLocationCache(); logger.debug( "Endpoint [{}] unavailable for [{}] added/updated to unavailableEndpoints with timestamp [{}]", unavailableEndpoint, unavailableOperationType, updatedInfo.lastUnavailabilityCheckTimeStamp); } private void updateLocationCache(){ updateLocationCache(null, null, null, null); } private void updateLocationCache( Iterable<DatabaseAccountLocation> writeLocations, Iterable<DatabaseAccountLocation> readLocations, UnmodifiableList<String> preferenceList, Boolean enableMultipleWriteLocations) { synchronized (this.lockObject) { DatabaseAccountLocationsInfo nextLocationInfo = new DatabaseAccountLocationsInfo(this.locationInfo); logger.debug("updating location cache ..., current readLocations [{}], current writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); if (preferenceList != null) { nextLocationInfo.preferredLocations = preferenceList; } if (enableMultipleWriteLocations != null) { this.enableMultipleWriteLocations = enableMultipleWriteLocations; } this.clearStaleEndpointUnavailabilityInfo(); if (readLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableReadLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outReadRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByReadEndpoint); nextLocationInfo.availableReadEndpointByLocation = this.getEndpointByLocation(readLocations, out, outReadRegionMap); nextLocationInfo.availableReadLocations = out.v; nextLocationInfo.regionNameByReadEndpoint = outReadRegionMap.v; } if (writeLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableWriteLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outWriteRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByWriteEndpoint); nextLocationInfo.availableWriteEndpointByLocation = this.getEndpointByLocation(writeLocations, out, outWriteRegionMap); nextLocationInfo.availableWriteLocations = out.v; nextLocationInfo.regionNameByWriteEndpoint = outWriteRegionMap.v; } nextLocationInfo.writeEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableWriteEndpointByLocation, nextLocationInfo.availableWriteLocations, OperationType.Write, this.defaultEndpoint); nextLocationInfo.readEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableReadEndpointByLocation, nextLocationInfo.availableReadLocations, OperationType.Read, nextLocationInfo.writeEndpoints.get(0)); this.lastCacheUpdateTimestamp = Instant.now(); logger.debug("updating location cache finished, new readLocations [{}], new writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); this.locationInfo = nextLocationInfo; } } private UnmodifiableList<URI> getPreferredAvailableEndpoints(UnmodifiableMap<String, URI> endpointsByLocation, UnmodifiableList<String> orderedLocations, OperationType expectedAvailableOperation, URI fallbackEndpoint) { List<URI> endpoints = new ArrayList<>(); DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if (this.enableEndpointDiscovery) { if (this.canUseMultipleWriteLocations() || expectedAvailableOperation.supports(OperationType.Read)) { List<URI> unavailableEndpoints = new ArrayList<>(); if (currentLocationInfo.preferredLocations != null && !currentLocationInfo.preferredLocations.isEmpty()) { for (String location: currentLocationInfo.preferredLocations) { Utils.ValueHolder<URI> endpoint = new Utils.ValueHolder<>(); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.defaultEndpoint.equals(endpoint.v)) { endpoints = new ArrayList<>(); break; } if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } endpoints.addAll(unavailableEndpoints); } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (!Strings.isNullOrEmpty(location) && Utils.tryGetValue(endpointsByLocation, location, endpoint)) { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } return new UnmodifiableList<URI>(endpoints); } private UnmodifiableMap<String, URI> getEndpointByLocation(Iterable<DatabaseAccountLocation> locations, Utils.ValueHolder<UnmodifiableList<String>> orderedLocations, Utils.ValueHolder<UnmodifiableMap<URI, String>> regionMap) { Map<String, URI> endpointsByLocation = new CaseInsensitiveMap<>(); Map<URI, String> regionByEndpoint = new CaseInsensitiveMap<>(); List<String> parsedLocations = new ArrayList<>(); for (DatabaseAccountLocation location: locations) { if (!Strings.isNullOrEmpty(location.getName())) { try { URI endpoint = new URI(location.getEndpoint().toLowerCase(Locale.ROOT)); endpointsByLocation.put(location.getName().toLowerCase(Locale.ROOT), endpoint); regionByEndpoint.put(endpoint, location.getName().toLowerCase(Locale.ROOT)); parsedLocations.add(location.getName()); } catch (Exception e) { logger.warn("GetAvailableEndpointsByLocation() - skipping add for location = [{}] as it is location name is either empty or endpoint is malformed [{}]", location.getName(), location.getEndpoint()); } } } orderedLocations.v = new UnmodifiableList<String>(parsedLocations); regionMap.v = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(regionByEndpoint); return (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(endpointsByLocation); } public boolean canUseMultipleWriteLocations() { return this.useMultipleWriteLocations && this.enableMultipleWriteLocations; } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.canUseMultipleWriteLocations() && (request.getResourceType() == ResourceType.Document || (request.getResourceType() == ResourceType.StoredProcedure && request.getOperationType() == com.azure.cosmos.implementation.OperationType.ExecuteJavaScript)); } private static class LocationUnavailabilityInfo { LocationUnavailabilityInfo(Instant instant, OperationType type) { this.lastUnavailabilityCheckTimeStamp = instant; this.unavailableOperations = type; } public Instant lastUnavailabilityCheckTimeStamp; public OperationType unavailableOperations; } private enum OperationType { None(0x0), Read(0x1), Write(0x2), ReadAndWrite(0x3); private final int flag; public boolean hasReadFlag() { return (flag & Read.flag) != 0; } public boolean hasWriteFlag() { return (flag & Write.flag) != 0; } public static OperationType combine(OperationType t1, OperationType t2) { switch (t1.flag | t2.flag) { case 0x0: return None; case 0x1: return Read; case 0x2: return Write; default: return ReadAndWrite; } } public boolean supports(OperationType type) { return (flag & type.flag) != 0; } OperationType(int flag) { this.flag = flag; } } private boolean durationPassed(Instant end, Instant start, Duration duration) { return end.minus(duration).isAfter(start); } private boolean unavailableLocationsExpirationTimePassed() { return durationPassed(Instant.now(), this.lastCacheUpdateTimestamp, this.unavailableLocationsExpirationTime); } private static boolean isExcludedRegionsSupplierConfigured(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) { return excludedRegionsSupplier != null && excludedRegionsSupplier.get() != null; } static class DatabaseAccountLocationsInfo { private UnmodifiableList<String> preferredLocations; private UnmodifiableList<String> availableWriteLocations; private UnmodifiableList<String> availableReadLocations; private UnmodifiableMap<String, URI> availableWriteEndpointByLocation; private UnmodifiableMap<String, URI> availableReadEndpointByLocation; private UnmodifiableMap<URI, String> regionNameByWriteEndpoint; private UnmodifiableMap<URI, String> regionNameByReadEndpoint; private UnmodifiableList<URI> writeEndpoints; private UnmodifiableList<URI> readEndpoints; public DatabaseAccountLocationsInfo(List<String> preferredLocations, URI defaultEndpoint) { this.preferredLocations = new UnmodifiableList<>(preferredLocations.stream().map(loc -> loc.toLowerCase(Locale.ROOT)).collect(Collectors.toList())); this.availableWriteEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByWriteEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByReadEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteLocations = new UnmodifiableList<>(Collections.emptyList()); this.readEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); this.writeEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); } public DatabaseAccountLocationsInfo(DatabaseAccountLocationsInfo other) { this.preferredLocations = other.preferredLocations; this.availableWriteLocations = other.availableWriteLocations; this.availableReadLocations = other.availableReadLocations; this.availableWriteEndpointByLocation = other.availableWriteEndpointByLocation; this.regionNameByWriteEndpoint = other.regionNameByWriteEndpoint; this.regionNameByReadEndpoint = other.regionNameByReadEndpoint; this.availableReadEndpointByLocation = other.availableReadEndpointByLocation; this.writeEndpoints = other.writeEndpoints; this.readEndpoints = other.readEndpoints; } } }
class LocationCache { private final static Logger logger = LoggerFactory.getLogger(LocationCache.class); private final boolean enableEndpointDiscovery; private final URI defaultEndpoint; private final boolean useMultipleWriteLocations; private final Object lockObject; private final Duration unavailableLocationsExpirationTime; private final ConcurrentHashMap<URI, LocationUnavailabilityInfo> locationUnavailabilityInfoByEndpoint; private final ConnectionPolicy connectionPolicy; private DatabaseAccountLocationsInfo locationInfo; private Instant lastCacheUpdateTimestamp; private boolean enableMultipleWriteLocations; public LocationCache( ConnectionPolicy connectionPolicy, URI defaultEndpoint, Configs configs) { List<String> preferredLocations = new ArrayList<>(connectionPolicy.getPreferredRegions() != null ? connectionPolicy.getPreferredRegions() : Collections.emptyList() ); this.locationInfo = new DatabaseAccountLocationsInfo(preferredLocations, defaultEndpoint); this.defaultEndpoint = defaultEndpoint; this.enableEndpointDiscovery = connectionPolicy.isEndpointDiscoveryEnabled(); this.useMultipleWriteLocations = connectionPolicy.isMultipleWriteRegionsEnabled(); this.lockObject = new Object(); this.locationUnavailabilityInfoByEndpoint = new ConcurrentHashMap<>(); this.lastCacheUpdateTimestamp = Instant.MIN; this.enableMultipleWriteLocations = false; this.unavailableLocationsExpirationTime = Duration.ofSeconds(configs.getUnavailableLocationsExpirationTimeInSeconds()); this.connectionPolicy = connectionPolicy; } /** * Gets list of read endpoints ordered by * * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getReadEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.readEndpoints; } /** * Gets list of write endpoints ordered by * 1. Preferred location * 2. Endpoint availability * @return */ public UnmodifiableList<URI> getWriteEndpoints() { if (this.locationUnavailabilityInfoByEndpoint.size() > 0 && unavailableLocationsExpirationTimePassed()) { this.updateLocationCache(); } return this.locationInfo.writeEndpoints; } /*** * Get the list of available read endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableReadEndpoints() { return this.locationInfo.availableReadEndpointByLocation.values().stream().collect(Collectors.toList()); } /*** * Get the list of available write endpoints. * The list will not be filtered by preferred region list. * * This method is ONLY used for fault injection. * @return */ public List<URI> getAvailableWriteEndpoints() { return this.locationInfo.availableWriteEndpointByLocation.values().stream().collect(Collectors.toList()); } public List<String> getEffectivePreferredLocations() { return this.locationInfo.effectivePreferredLocations; } /** * Marks the current location unavailable for read */ public void markEndpointUnavailableForRead(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Read); } /** * Marks the current location unavailable for write */ public void markEndpointUnavailableForWrite(URI endpoint) { this.markEndpointUnavailable(endpoint, OperationType.Write); } /** * Invoked when {@link DatabaseAccount} is read * @param databaseAccount READ DatabaseAccount */ public void onDatabaseAccountRead(DatabaseAccount databaseAccount) { this.updateLocationCache( databaseAccount.getWritableLocations(), databaseAccount.getReadableLocations(), null, BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); } void onLocationPreferenceChanged(UnmodifiableList<String> preferredLocations) { this.updateLocationCache( null, null , preferredLocations, null); } /** * Resolves request to service endpoint. * 1. If this is a write request * (a) If UseMultipleWriteLocations = true * (i) For document writes, resolve to most preferred and available write endpoint. * Once the endpoint is marked unavailable, it is moved to the end of available write endpoint. Current request will * be retried on next preferred available write endpoint. * (ii) For all other resources, always resolve to first/second (regardless of preferred locations) * write getEndpoint in {@link DatabaseAccount * Endpoint of first write location in {@link DatabaseAccount * write operation on all resource types (except during that region's failover). * Only during manual failover, client would retry write on second write location in {@link DatabaseAccount * (b) Else resolve the request to first write getEndpoint in {@link DatabaseAccount * second write getEndpoint in {@link DatabaseAccount * 2. Else resolve the request to most preferred available read getEndpoint (getAutomatic failover for read requests) * @param request Request for which getEndpoint is to be resolved * @return Resolved getEndpoint */ public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { Objects.requireNonNull(request.requestContext, "RxDocumentServiceRequest.requestContext is required and cannot be null."); if(request.requestContext.locationEndpointToRoute != null) { return request.requestContext.locationEndpointToRoute; } int locationIndex = Utils.getValueOrDefault(request.requestContext.locationIndexToRoute, 0); boolean usePreferredLocations = request.requestContext.usePreferredLocations != null ? request.requestContext.usePreferredLocations : true; if(!usePreferredLocations || (request.getOperationType().isWriteOperation() && !this.canUseMultipleWriteLocations(request))) { DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if(this.enableEndpointDiscovery && currentLocationInfo.availableWriteLocations.size() > 0) { locationIndex = Math.min(locationIndex%2, currentLocationInfo.availableWriteLocations.size()-1); String writeLocation = currentLocationInfo.availableWriteLocations.get(locationIndex); return currentLocationInfo.availableWriteEndpointByLocation.get(writeLocation); } else { return this.defaultEndpoint; } } else { UnmodifiableList<URI> endpoints = request.getOperationType().isWriteOperation()? this.getApplicableWriteEndpoints(request) : this.getApplicableReadEndpoints(request); return endpoints.get(locationIndex % endpoints.size()); } } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.getApplicableWriteEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> writeEndpoints = this.getWriteEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return writeEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( writeEndpoints, this.locationInfo.regionNameByWriteEndpoint, this.defaultEndpoint, effectiveExcludedRegionsWithPartitionUnavailableRegions); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.getApplicableReadEndpoints(request.requestContext.getExcludeRegions(), request.requestContext.getUnavailableRegionsForPartition()); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegionsOnRequest, List<String> unavailableRegionsForPartition) { UnmodifiableList<URI> readEndpoints = this.getReadEndpoints(); Supplier<CosmosExcludedRegions> excludedRegionsSupplier = this.connectionPolicy.getExcludedRegionsSupplier(); List<String> effectiveExcludedRegions = isExcludedRegionsSupplierConfigured(excludedRegionsSupplier) ? new ArrayList<>(excludedRegionsSupplier.get().getExcludedRegions()) : Collections.emptyList(); if (!isExcludeRegionsConfigured(excludedRegionsOnRequest, effectiveExcludedRegions) && (unavailableRegionsForPartition == null || unavailableRegionsForPartition.isEmpty())) { return readEndpoints; } if (excludedRegionsOnRequest != null && !excludedRegionsOnRequest.isEmpty()) { effectiveExcludedRegions = excludedRegionsOnRequest; } List<String> effectiveExcludedRegionsWithPartitionUnavailableRegions = new ArrayList<>(effectiveExcludedRegions); if (unavailableRegionsForPartition != null) { effectiveExcludedRegionsWithPartitionUnavailableRegions.addAll(unavailableRegionsForPartition); } return this.getApplicableEndpoints( readEndpoints, this.locationInfo.regionNameByReadEndpoint, this.locationInfo.writeEndpoints.get(0), effectiveExcludedRegionsWithPartitionUnavailableRegions); } private UnmodifiableList<URI> getApplicableEndpoints( UnmodifiableList<URI> endpoints, UnmodifiableMap<URI, String> regionNameByEndpoint, URI fallbackEndpoint, List<String> excludeRegionList) { List<URI> applicableEndpoints = new ArrayList<>(); for (URI endpoint : endpoints) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (Utils.tryGetValue(regionNameByEndpoint, endpoint, regionName)) { if (!excludeRegionList.stream().anyMatch(regionName.v::equalsIgnoreCase)) { applicableEndpoints.add(endpoint); } } } if (applicableEndpoints.isEmpty()) { applicableEndpoints.add(fallbackEndpoint); } return new UnmodifiableList<>(applicableEndpoints); } private boolean isExcludeRegionsConfigured(List<String> excludedRegionsOnRequest, List<String> excludedRegionsOnClient) { boolean isExcludedRegionsConfiguredOnRequest = !(excludedRegionsOnRequest == null || excludedRegionsOnRequest.isEmpty()); boolean isExcludedRegionsConfiguredOnClient = !(excludedRegionsOnClient == null || excludedRegionsOnClient.isEmpty()); return isExcludedRegionsConfiguredOnRequest || isExcludedRegionsConfiguredOnClient; } public URI resolveFaultInjectionEndpoint(String region, boolean writeOnly) { Utils.ValueHolder<URI> endpointValueHolder = new Utils.ValueHolder<>(); if (writeOnly) { Utils.tryGetValue(this.locationInfo.availableWriteEndpointByLocation, region, endpointValueHolder); } else { Utils.tryGetValue(this.locationInfo.availableReadEndpointByLocation, region, endpointValueHolder); } if (endpointValueHolder.v != null) { return endpointValueHolder.v; } throw new IllegalArgumentException("Can not find service endpoint for region " + region); } public URI getDefaultEndpoint() { return this.defaultEndpoint; } public String getRegionName(URI locationEndpoint, com.azure.cosmos.implementation.OperationType operationType) { Utils.ValueHolder<String> regionName = new Utils.ValueHolder<>(); if (operationType.isWriteOperation()) { if (Utils.tryGetValue(this.locationInfo.regionNameByWriteEndpoint, locationEndpoint, regionName)) { return regionName.v; } } else { if (Utils.tryGetValue(this.locationInfo.regionNameByReadEndpoint, locationEndpoint, regionName)) { return regionName.v; } } return this.locationInfo.availableWriteLocations.get(0).toLowerCase(Locale.ROOT); } private boolean areEqual(URI url1, URI url2) { return url1.equals(url2); } private void clearStaleEndpointUnavailabilityInfo() { if (!this.locationUnavailabilityInfoByEndpoint.isEmpty()) { List<URI> unavailableEndpoints = new ArrayList<>(this.locationUnavailabilityInfoByEndpoint.keySet()); for (URI unavailableEndpoint: unavailableEndpoints) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); Utils.ValueHolder<LocationUnavailabilityInfo> removedHolder = new Utils.ValueHolder<>(); if (Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, unavailabilityInfoHolder) && durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime) && Utils.tryRemove(this.locationUnavailabilityInfoByEndpoint, unavailableEndpoint, removedHolder)) { logger.debug( "Removed endpoint [{}] unavailable for operations [{}] from unavailableEndpoints", unavailableEndpoint, unavailabilityInfoHolder.v.unavailableOperations); } } } } private boolean isEndpointUnavailable(URI endpoint, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); if (expectedAvailableOperations == OperationType.None || !Utils.tryGetValue(this.locationUnavailabilityInfoByEndpoint, endpoint, unavailabilityInfoHolder) || !unavailabilityInfoHolder.v.unavailableOperations.supports(expectedAvailableOperations)) { return false; } else { if (durationPassed(Instant.now(), unavailabilityInfoHolder.v.lastUnavailabilityCheckTimeStamp, this.unavailableLocationsExpirationTime)) { return false; } else { logger.debug( "Endpoint [{}] unavailable for operations [{}] present in unavailableEndpoints", endpoint, unavailabilityInfoHolder.v.unavailableOperations); return true; } } } private boolean anyEndpointsAvailable(List<URI> endpoints, OperationType expectedAvailableOperations) { Utils.ValueHolder<LocationUnavailabilityInfo> unavailabilityInfoHolder = new Utils.ValueHolder<>(); boolean anyEndpointsAvailable = false; for (URI endpoint : endpoints) { if (!isEndpointUnavailable(endpoint, expectedAvailableOperations)) { anyEndpointsAvailable = true; break; } } return anyEndpointsAvailable; } private void markEndpointUnavailable( URI unavailableEndpoint, OperationType unavailableOperationType) { Instant currentTime = Instant.now(); LocationUnavailabilityInfo updatedInfo = this.locationUnavailabilityInfoByEndpoint.compute( unavailableEndpoint, new BiFunction<URI, LocationUnavailabilityInfo, LocationUnavailabilityInfo>() { @Override public LocationUnavailabilityInfo apply(URI url, LocationUnavailabilityInfo info) { if (info == null) { return new LocationUnavailabilityInfo(currentTime, unavailableOperationType); } else { info.lastUnavailabilityCheckTimeStamp = currentTime; info.unavailableOperations = OperationType.combine(info.unavailableOperations, unavailableOperationType); return info; } } }); this.updateLocationCache(); logger.debug( "Endpoint [{}] unavailable for [{}] added/updated to unavailableEndpoints with timestamp [{}]", unavailableEndpoint, unavailableOperationType, updatedInfo.lastUnavailabilityCheckTimeStamp); } private void updateLocationCache(){ updateLocationCache(null, null, null, null); } private void updateLocationCache( Iterable<DatabaseAccountLocation> writeLocations, Iterable<DatabaseAccountLocation> readLocations, UnmodifiableList<String> preferenceList, Boolean enableMultipleWriteLocations) { synchronized (this.lockObject) { DatabaseAccountLocationsInfo nextLocationInfo = new DatabaseAccountLocationsInfo(this.locationInfo); logger.debug("updating location cache ..., current readLocations [{}], current writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); if (preferenceList != null) { nextLocationInfo.preferredLocations = preferenceList; } if (enableMultipleWriteLocations != null) { this.enableMultipleWriteLocations = enableMultipleWriteLocations; } this.clearStaleEndpointUnavailabilityInfo(); if (readLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableReadLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outReadRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByReadEndpoint); nextLocationInfo.availableReadEndpointByLocation = this.getEndpointByLocation(readLocations, out, outReadRegionMap); nextLocationInfo.availableReadLocations = out.v; nextLocationInfo.regionNameByReadEndpoint = outReadRegionMap.v; } if (writeLocations != null) { Utils.ValueHolder<UnmodifiableList<String>> out = Utils.ValueHolder.initialize(nextLocationInfo.availableWriteLocations); Utils.ValueHolder<UnmodifiableMap<URI, String>> outWriteRegionMap = Utils.ValueHolder.initialize(nextLocationInfo.regionNameByWriteEndpoint); nextLocationInfo.availableWriteEndpointByLocation = this.getEndpointByLocation(writeLocations, out, outWriteRegionMap); nextLocationInfo.availableWriteLocations = out.v; nextLocationInfo.regionNameByWriteEndpoint = outWriteRegionMap.v; } nextLocationInfo.writeEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableWriteEndpointByLocation, nextLocationInfo.availableWriteLocations, OperationType.Write, this.defaultEndpoint); nextLocationInfo.readEndpoints = this.getPreferredAvailableEndpoints(nextLocationInfo.availableReadEndpointByLocation, nextLocationInfo.availableReadLocations, OperationType.Read, nextLocationInfo.writeEndpoints.get(0)); if (nextLocationInfo.preferredLocations == null || nextLocationInfo.preferredLocations.isEmpty()) { Utils.ValueHolder<String> regionForDefaultEndpoint = new Utils.ValueHolder<>(); if (!Utils.tryGetValue(nextLocationInfo.regionNameByReadEndpoint, this.defaultEndpoint, regionForDefaultEndpoint)) { nextLocationInfo.effectivePreferredLocations = nextLocationInfo.availableReadLocations; } } this.lastCacheUpdateTimestamp = Instant.now(); logger.debug("updating location cache finished, new readLocations [{}], new writeLocations [{}]", nextLocationInfo.readEndpoints, nextLocationInfo.writeEndpoints); this.locationInfo = nextLocationInfo; } } private UnmodifiableList<URI> getPreferredAvailableEndpoints(UnmodifiableMap<String, URI> endpointsByLocation, UnmodifiableList<String> orderedLocations, OperationType expectedAvailableOperation, URI fallbackEndpoint) { List<URI> endpoints = new ArrayList<>(); DatabaseAccountLocationsInfo currentLocationInfo = this.locationInfo; if (this.enableEndpointDiscovery) { if (this.canUseMultipleWriteLocations() || expectedAvailableOperation.supports(OperationType.Read)) { List<URI> unavailableEndpoints = new ArrayList<>(); if (currentLocationInfo.preferredLocations != null && !currentLocationInfo.preferredLocations.isEmpty()) { for (String location: currentLocationInfo.preferredLocations) { Utils.ValueHolder<URI> endpoint = new Utils.ValueHolder<>(); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (Utils.tryGetValue(endpointsByLocation, location, endpoint)) { if (this.defaultEndpoint.equals(endpoint.v)) { endpoints = new ArrayList<>(); break; } if (this.isEndpointUnavailable(endpoint.v, expectedAvailableOperation)) { unavailableEndpoints.add(endpoint.v); } else { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } endpoints.addAll(unavailableEndpoints); } else { for (String location : orderedLocations) { Utils.ValueHolder<URI> endpoint = Utils.ValueHolder.initialize(null); if (!Strings.isNullOrEmpty(location) && Utils.tryGetValue(endpointsByLocation, location, endpoint)) { endpoints.add(endpoint.v); } } } } if (endpoints.isEmpty()) { endpoints.add(fallbackEndpoint); } return new UnmodifiableList<URI>(endpoints); } private UnmodifiableMap<String, URI> getEndpointByLocation(Iterable<DatabaseAccountLocation> locations, Utils.ValueHolder<UnmodifiableList<String>> orderedLocations, Utils.ValueHolder<UnmodifiableMap<URI, String>> regionMap) { Map<String, URI> endpointsByLocation = new CaseInsensitiveMap<>(); Map<URI, String> regionByEndpoint = new CaseInsensitiveMap<>(); List<String> parsedLocations = new ArrayList<>(); for (DatabaseAccountLocation location: locations) { if (!Strings.isNullOrEmpty(location.getName())) { try { URI endpoint = new URI(location.getEndpoint().toLowerCase(Locale.ROOT)); endpointsByLocation.put(location.getName().toLowerCase(Locale.ROOT), endpoint); regionByEndpoint.put(endpoint, location.getName().toLowerCase(Locale.ROOT)); parsedLocations.add(location.getName()); } catch (Exception e) { logger.warn("GetAvailableEndpointsByLocation() - skipping add for location = [{}] as it is location name is either empty or endpoint is malformed [{}]", location.getName(), location.getEndpoint()); } } } orderedLocations.v = new UnmodifiableList<String>(parsedLocations); regionMap.v = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(regionByEndpoint); return (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(endpointsByLocation); } public boolean canUseMultipleWriteLocations() { return this.useMultipleWriteLocations && this.enableMultipleWriteLocations; } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.canUseMultipleWriteLocations() && (request.getResourceType() == ResourceType.Document || (request.getResourceType() == ResourceType.StoredProcedure && request.getOperationType() == com.azure.cosmos.implementation.OperationType.ExecuteJavaScript)); } private static class LocationUnavailabilityInfo { LocationUnavailabilityInfo(Instant instant, OperationType type) { this.lastUnavailabilityCheckTimeStamp = instant; this.unavailableOperations = type; } public Instant lastUnavailabilityCheckTimeStamp; public OperationType unavailableOperations; } private enum OperationType { None(0x0), Read(0x1), Write(0x2), ReadAndWrite(0x3); private final int flag; public boolean hasReadFlag() { return (flag & Read.flag) != 0; } public boolean hasWriteFlag() { return (flag & Write.flag) != 0; } public static OperationType combine(OperationType t1, OperationType t2) { switch (t1.flag | t2.flag) { case 0x0: return None; case 0x1: return Read; case 0x2: return Write; default: return ReadAndWrite; } } public boolean supports(OperationType type) { return (flag & type.flag) != 0; } OperationType(int flag) { this.flag = flag; } } private boolean durationPassed(Instant end, Instant start, Duration duration) { return end.minus(duration).isAfter(start); } private boolean unavailableLocationsExpirationTimePassed() { return durationPassed(Instant.now(), this.lastCacheUpdateTimestamp, this.unavailableLocationsExpirationTime); } private static boolean isExcludedRegionsSupplierConfigured(Supplier<CosmosExcludedRegions> excludedRegionsSupplier) { return excludedRegionsSupplier != null && excludedRegionsSupplier.get() != null; } static class DatabaseAccountLocationsInfo { private UnmodifiableList<String> preferredLocations; private UnmodifiableList<String> effectivePreferredLocations; private UnmodifiableList<String> availableWriteLocations; private UnmodifiableList<String> availableReadLocations; private UnmodifiableMap<String, URI> availableWriteEndpointByLocation; private UnmodifiableMap<String, URI> availableReadEndpointByLocation; private UnmodifiableMap<URI, String> regionNameByWriteEndpoint; private UnmodifiableMap<URI, String> regionNameByReadEndpoint; private UnmodifiableList<URI> writeEndpoints; private UnmodifiableList<URI> readEndpoints; public DatabaseAccountLocationsInfo(List<String> preferredLocations, URI defaultEndpoint) { this.preferredLocations = new UnmodifiableList<>(preferredLocations.stream().map(loc -> loc.toLowerCase(Locale.ROOT)).collect(Collectors.toList())); this.effectivePreferredLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadEndpointByLocation = (UnmodifiableMap<String, URI>) UnmodifiableMap.<String, URI>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByWriteEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.regionNameByReadEndpoint = (UnmodifiableMap<URI, String>) UnmodifiableMap.<URI, String>unmodifiableMap(new CaseInsensitiveMap<>()); this.availableReadLocations = new UnmodifiableList<>(Collections.emptyList()); this.availableWriteLocations = new UnmodifiableList<>(Collections.emptyList()); this.readEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); this.writeEndpoints = new UnmodifiableList<>(Collections.singletonList(defaultEndpoint)); } public DatabaseAccountLocationsInfo(DatabaseAccountLocationsInfo other) { this.preferredLocations = other.preferredLocations; this.effectivePreferredLocations = other.effectivePreferredLocations; this.availableWriteLocations = other.availableWriteLocations; this.availableReadLocations = other.availableReadLocations; this.availableWriteEndpointByLocation = other.availableWriteEndpointByLocation; this.regionNameByWriteEndpoint = other.regionNameByWriteEndpoint; this.regionNameByReadEndpoint = other.regionNameByReadEndpoint; this.availableReadEndpointByLocation = other.availableReadEndpointByLocation; this.writeEndpoints = other.writeEndpoints; this.readEndpoints = other.readEndpoints; } } }
How i atomicity ensured here? WOuldn't you need to hold at least a read lock to read this.lastDatabaseAccount and locationCache atomically?
private List<String> getEffectivePreferredRegions() { if (this.connectionPolicy.getPreferredRegions() != null && !this.connectionPolicy.getPreferredRegions().isEmpty()) { return this.connectionPolicy.getPreferredRegions(); } if (this.latestDatabaseAccount == null) { return Collections.emptyList(); } return this.locationCache.getEffectivePreferredLocations(); }
private List<String> getEffectivePreferredRegions() { if (this.connectionPolicy.getPreferredRegions() != null && !this.connectionPolicy.getPreferredRegions().isEmpty()) { return this.connectionPolicy.getPreferredRegions(); } this.databaseAccountReadLock.lock(); try { if (this.latestDatabaseAccount == null) { return Collections.emptyList(); } return this.locationCache.getEffectivePreferredLocations(); } finally { this.databaseAccountReadLock.unlock(); } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions, Collections.emptyList()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions, Collections.emptyList()); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { List<String> effectivePreferredRegions = this.getEffectivePreferredRegions(); return effectivePreferredRegions != null ? effectivePreferredRegions.size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.info("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.info("db account retrieved {}", dbAccount); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private final ReentrantReadWriteLock.WriteLock databaseAccountWriteLock; private final ReentrantReadWriteLock.ReadLock databaseAccountReadLock; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; ReentrantReadWriteLock reentrantReadWriteLock = new ReentrantReadWriteLock(); this.databaseAccountWriteLock = reentrantReadWriteLock.writeLock(); this.databaseAccountReadLock = reentrantReadWriteLock.readLock(); } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions, Collections.emptyList()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions, Collections.emptyList()); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(dbAccount); } finally { this.databaseAccountWriteLock.unlock(); } return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { List<String> effectivePreferredRegions = this.getEffectivePreferredRegions(); return effectivePreferredRegions != null ? effectivePreferredRegions.size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(databaseAccount); } finally { this.databaseAccountWriteLock.unlock(); } } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(dbAccount); } finally { this.databaseAccountWriteLock.unlock(); } this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.info("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.info("db account retrieved {}", dbAccount); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.databaseAccountWriteLock.lock(); try { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } finally { this.databaseAccountWriteLock.unlock(); } } logger.debug("account retrieved: {}", databaseAccount); }).single(); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } }
So `latestDatabaseAccount` is `volatile` and `locationCache` is `final` and its contents (location-related properties are updated under a lock) so should be visible to other threads. So you're right there is no atomicity but only visibility - I can update to make `latestDatabaseAccount` and `locationCache` updates under a write lock and its access under a read lock.
private List<String> getEffectivePreferredRegions() { if (this.connectionPolicy.getPreferredRegions() != null && !this.connectionPolicy.getPreferredRegions().isEmpty()) { return this.connectionPolicy.getPreferredRegions(); } if (this.latestDatabaseAccount == null) { return Collections.emptyList(); } return this.locationCache.getEffectivePreferredLocations(); }
private List<String> getEffectivePreferredRegions() { if (this.connectionPolicy.getPreferredRegions() != null && !this.connectionPolicy.getPreferredRegions().isEmpty()) { return this.connectionPolicy.getPreferredRegions(); } this.databaseAccountReadLock.lock(); try { if (this.latestDatabaseAccount == null) { return Collections.emptyList(); } return this.locationCache.getEffectivePreferredLocations(); } finally { this.databaseAccountReadLock.unlock(); } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions, Collections.emptyList()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions, Collections.emptyList()); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { List<String> effectivePreferredRegions = this.getEffectivePreferredRegions(); return effectivePreferredRegions != null ? effectivePreferredRegions.size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.info("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.info("db account retrieved {}", dbAccount); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private final ReentrantReadWriteLock.WriteLock databaseAccountWriteLock; private final ReentrantReadWriteLock.ReadLock databaseAccountReadLock; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; ReentrantReadWriteLock reentrantReadWriteLock = new ReentrantReadWriteLock(); this.databaseAccountWriteLock = reentrantReadWriteLock.writeLock(); this.databaseAccountReadLock = reentrantReadWriteLock.readLock(); } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions, Collections.emptyList()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions, Collections.emptyList()); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(dbAccount); } finally { this.databaseAccountWriteLock.unlock(); } return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { List<String> effectivePreferredRegions = this.getEffectivePreferredRegions(); return effectivePreferredRegions != null ? effectivePreferredRegions.size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(databaseAccount); } finally { this.databaseAccountWriteLock.unlock(); } } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(dbAccount); } finally { this.databaseAccountWriteLock.unlock(); } this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.info("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.info("db account retrieved {}", dbAccount); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.databaseAccountWriteLock.lock(); try { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } finally { this.databaseAccountWriteLock.unlock(); } } logger.debug("account retrieved: {}", databaseAccount); }).single(); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } }
It will make `latestDatabaseAccount` assignment sequential but should be okay since such assignments happen more frequently outside the hot path.
private List<String> getEffectivePreferredRegions() { if (this.connectionPolicy.getPreferredRegions() != null && !this.connectionPolicy.getPreferredRegions().isEmpty()) { return this.connectionPolicy.getPreferredRegions(); } if (this.latestDatabaseAccount == null) { return Collections.emptyList(); } return this.locationCache.getEffectivePreferredLocations(); }
private List<String> getEffectivePreferredRegions() { if (this.connectionPolicy.getPreferredRegions() != null && !this.connectionPolicy.getPreferredRegions().isEmpty()) { return this.connectionPolicy.getPreferredRegions(); } this.databaseAccountReadLock.lock(); try { if (this.latestDatabaseAccount == null) { return Collections.emptyList(); } return this.locationCache.getEffectivePreferredLocations(); } finally { this.databaseAccountReadLock.unlock(); } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions, Collections.emptyList()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions, Collections.emptyList()); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { List<String> effectivePreferredRegions = this.getEffectivePreferredRegions(); return effectivePreferredRegions != null ? effectivePreferredRegions.size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.info("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.info("db account retrieved {}", dbAccount); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private final ReentrantReadWriteLock.WriteLock databaseAccountWriteLock; private final ReentrantReadWriteLock.ReadLock databaseAccountReadLock; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; ReentrantReadWriteLock reentrantReadWriteLock = new ReentrantReadWriteLock(); this.databaseAccountWriteLock = reentrantReadWriteLock.writeLock(); this.databaseAccountReadLock = reentrantReadWriteLock.readLock(); } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions, Collections.emptyList()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions, Collections.emptyList()); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(dbAccount); } finally { this.databaseAccountWriteLock.unlock(); } return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { List<String> effectivePreferredRegions = this.getEffectivePreferredRegions(); return effectivePreferredRegions != null ? effectivePreferredRegions.size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(databaseAccount); } finally { this.databaseAccountWriteLock.unlock(); } } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(dbAccount); } finally { this.databaseAccountWriteLock.unlock(); } this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.info("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.info("db account retrieved {}", dbAccount); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.databaseAccountWriteLock.lock(); try { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } finally { this.databaseAccountWriteLock.unlock(); } } logger.debug("account retrieved: {}", databaseAccount); }).single(); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } }
So, what I did is plain access of `latestDatabaseAccount` - I leave it as is. Any reassignment I wrap it up with a write lock and when I have to access both `latestDatabaseAccount` and `locationCache` I do so under a read lock.
private List<String> getEffectivePreferredRegions() { if (this.connectionPolicy.getPreferredRegions() != null && !this.connectionPolicy.getPreferredRegions().isEmpty()) { return this.connectionPolicy.getPreferredRegions(); } if (this.latestDatabaseAccount == null) { return Collections.emptyList(); } return this.locationCache.getEffectivePreferredLocations(); }
private List<String> getEffectivePreferredRegions() { if (this.connectionPolicy.getPreferredRegions() != null && !this.connectionPolicy.getPreferredRegions().isEmpty()) { return this.connectionPolicy.getPreferredRegions(); } this.databaseAccountReadLock.lock(); try { if (this.latestDatabaseAccount == null) { return Collections.emptyList(); } return this.locationCache.getEffectivePreferredLocations(); } finally { this.databaseAccountReadLock.unlock(); } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions, Collections.emptyList()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions, Collections.emptyList()); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { List<String> effectivePreferredRegions = this.getEffectivePreferredRegions(); return effectivePreferredRegions != null ? effectivePreferredRegions.size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.info("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.info("db account retrieved {}", dbAccount); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private final ReentrantReadWriteLock.WriteLock databaseAccountWriteLock; private final ReentrantReadWriteLock.ReadLock databaseAccountReadLock; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; ReentrantReadWriteLock reentrantReadWriteLock = new ReentrantReadWriteLock(); this.databaseAccountWriteLock = reentrantReadWriteLock.writeLock(); this.databaseAccountReadLock = reentrantReadWriteLock.readLock(); } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions, Collections.emptyList()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions, Collections.emptyList()); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(dbAccount); } finally { this.databaseAccountWriteLock.unlock(); } return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { List<String> effectivePreferredRegions = this.getEffectivePreferredRegions(); return effectivePreferredRegions != null ? effectivePreferredRegions.size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(databaseAccount); } finally { this.databaseAccountWriteLock.unlock(); } } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(dbAccount); } finally { this.databaseAccountWriteLock.unlock(); } this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.info("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.info("db account retrieved {}", dbAccount); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.databaseAccountWriteLock.lock(); try { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } finally { this.databaseAccountWriteLock.unlock(); } } logger.debug("account retrieved: {}", databaseAccount); }).single(); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } }
Fixed in next iterations.
private List<String> getEffectivePreferredRegions() { if (this.connectionPolicy.getPreferredRegions() != null && !this.connectionPolicy.getPreferredRegions().isEmpty()) { return this.connectionPolicy.getPreferredRegions(); } if (this.latestDatabaseAccount == null) { return Collections.emptyList(); } return this.locationCache.getEffectivePreferredLocations(); }
private List<String> getEffectivePreferredRegions() { if (this.connectionPolicy.getPreferredRegions() != null && !this.connectionPolicy.getPreferredRegions().isEmpty()) { return this.connectionPolicy.getPreferredRegions(); } this.databaseAccountReadLock.lock(); try { if (this.latestDatabaseAccount == null) { return Collections.emptyList(); } return this.locationCache.getEffectivePreferredLocations(); } finally { this.databaseAccountReadLock.unlock(); } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions, Collections.emptyList()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions, Collections.emptyList()); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { List<String> effectivePreferredRegions = this.getEffectivePreferredRegions(); return effectivePreferredRegions != null ? effectivePreferredRegions.size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.info("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.info("db account retrieved {}", dbAccount); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private final ReentrantReadWriteLock.WriteLock databaseAccountWriteLock; private final ReentrantReadWriteLock.ReadLock databaseAccountReadLock; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; ReentrantReadWriteLock reentrantReadWriteLock = new ReentrantReadWriteLock(); this.databaseAccountWriteLock = reentrantReadWriteLock.writeLock(); this.databaseAccountReadLock = reentrantReadWriteLock.readLock(); } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions, Collections.emptyList()); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions, Collections.emptyList()); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(dbAccount); } finally { this.databaseAccountWriteLock.unlock(); } return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { List<String> effectivePreferredRegions = this.getEffectivePreferredRegions(); return effectivePreferredRegions != null ? effectivePreferredRegions.size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(databaseAccount); } finally { this.databaseAccountWriteLock.unlock(); } } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.databaseAccountWriteLock.lock(); try { this.locationCache.onDatabaseAccountRead(dbAccount); } finally { this.databaseAccountWriteLock.unlock(); } this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.info("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.getEffectivePreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.info("db account retrieved {}", dbAccount); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.databaseAccountWriteLock.lock(); try { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } finally { this.databaseAccountWriteLock.unlock(); } } logger.debug("account retrieved: {}", databaseAccount); }).single(); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } }
Should we use `Mono.defer()` instead of `Mono.just()` here? We don't want to create the `InputStream` eagerly.
public Mono<InputStream> getBodyAsInputStream() { if (responseBody == null) { return Mono.empty(); } return Mono.just(responseBody.byteStream()); }
return Mono.just(responseBody.byteStream());
public Mono<InputStream> getBodyAsInputStream() { if (responseBody == null) { return Mono.empty(); } return Mono.fromSupplier(responseBody::byteStream); }
class OkHttpAsyncResponse extends OkHttpAsyncResponseBase { private static final int BYTE_BUFFER_CHUNK_SIZE = 8192; private final ResponseBody responseBody; /** * Creates an OkHttpAsyncResponse. * * @param response The OkHttp response. * @param request The request which generated the response. * @param eagerlyConvertHeaders Whether to eagerly convert the response headers. */ public OkHttpAsyncResponse(Response response, HttpRequest request, boolean eagerlyConvertHeaders) { super(response, request, eagerlyConvertHeaders); this.responseBody = response.body(); } @Override public BinaryData getBodyAsBinaryData() { return BinaryData.fromStream(this.responseBody.byteStream()); } @Override public Flux<ByteBuffer> getBody() { if (this.responseBody == null) { return Flux.empty(); } return Flux.using(this.responseBody::byteStream, bodyStream -> FluxUtil.toFluxByteBuffer(bodyStream, BYTE_BUFFER_CHUNK_SIZE), bodyStream -> this.close(), false); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.fromCallable(() -> { if (responseBody == null) { return null; } byte[] content = responseBody.bytes(); if (content.length == 0) { return null; } return content; }); } @Override @Override public void writeBodyTo(WritableByteChannel channel) throws IOException { if (responseBody != null) { try { IOUtils.transfer(responseBody.source(), channel, responseBody.contentLength()); } finally { close(); } } } @Override public Mono<Void> writeBodyToAsync(AsynchronousByteChannel channel) { if (responseBody != null) { return Mono.using(() -> this, ignored -> IOUtils.transferAsync(responseBody.source(), channel, responseBody.contentLength()), OkHttpAsyncResponse::close); } else { return Mono.empty(); } } @Override public void close() { if (this.responseBody != null) { this.responseBody.close(); } } }
class OkHttpAsyncResponse extends OkHttpAsyncResponseBase { private static final int BYTE_BUFFER_CHUNK_SIZE = 8192; private final ResponseBody responseBody; /** * Creates an OkHttpAsyncResponse. * * @param response The OkHttp response. * @param request The request which generated the response. * @param eagerlyConvertHeaders Whether to eagerly convert the response headers. */ public OkHttpAsyncResponse(Response response, HttpRequest request, boolean eagerlyConvertHeaders) { super(response, request, eagerlyConvertHeaders); this.responseBody = response.body(); } @Override public BinaryData getBodyAsBinaryData() { return BinaryData.fromStream(this.responseBody.byteStream()); } @Override public Flux<ByteBuffer> getBody() { if (this.responseBody == null) { return Flux.empty(); } return Flux.using(this.responseBody::byteStream, bodyStream -> FluxUtil.toFluxByteBuffer(bodyStream, BYTE_BUFFER_CHUNK_SIZE), bodyStream -> this.close(), false); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.fromCallable(() -> { if (responseBody == null) { return null; } byte[] content = responseBody.bytes(); if (content.length == 0) { return null; } return content; }); } @Override @Override public void writeBodyTo(WritableByteChannel channel) throws IOException { if (responseBody != null) { try { IOUtils.transfer(responseBody.source(), channel, responseBody.contentLength()); } finally { close(); } } } @Override public Mono<Void> writeBodyToAsync(AsynchronousByteChannel channel) { if (responseBody != null) { return Mono.using(() -> this, ignored -> IOUtils.transferAsync(responseBody.source(), channel, responseBody.contentLength()), OkHttpAsyncResponse::close); } else { return Mono.empty(); } } @Override public void close() { if (this.responseBody != null) { this.responseBody.close(); } } }
Can we convert this to base64 string here instead of having the private constructor?
public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Double> embeddingInDouble = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInDouble = reader.readArray(JsonReader::getDouble); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } return new EmbeddingItem(embedding, embeddingInDouble, promptIndex); }); }
embeddingInDouble = reader.readArray(JsonReader::getDouble);
public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Float> embeddingInFloat = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInFloat = reader.readArray(JsonReader::getFloat); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } EmbeddingItem embeddingItem = new EmbeddingItem(embedding, promptIndex); embeddingItem.embeddingInFloat = embeddingInFloat; return embeddingItem; }); }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ @Generated private final List<Double> embedding; private final String embeddingBase64; /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, List<Double> embedding, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; this.embedding = embedding; } /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embedding != null) { return embedding.stream().map(Double::floatValue).collect(Collectors.toList()); } return convertBase64ToFloatList(embeddingBase64); } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { return embeddingBase64; } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeStringField("object", "embedding"); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embedding != null) { jsonWriter.writeArrayField("embedding", embedding, JsonWriter::writeDouble); } return jsonWriter.writeEndObject(); } /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embeddingInFloat == null && embeddingBase64 != null) { embeddingInFloat = convertBase64ToFloatList(embeddingBase64); } return embeddingInFloat; } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { if (embeddingBase64 == null && embeddingInFloat != null) { embeddingBase64 = convertFloatListToBase64(embeddingInFloat); } return embeddingBase64; } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embeddingInFloat != null) { jsonWriter.writeArrayField("embedding", embeddingInFloat, JsonWriter::writeFloat); } return jsonWriter.writeEndObject(); } /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; } private List<Double> embedding; /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ private List<Float> embeddingInFloat; /* * List of embeddings value in base64 format for the input prompt. */ private String embeddingBase64; }
We have to keep the private constructor to skip the TypeSpec validaton. From this snapshot you can see `List<Double>` is generated but not `String`. Thus, I keep editing the private constructor and keep generated annotation for class field `List<Double> embedding` ![image](https://github.com/user-attachments/assets/86abdcd6-57ce-488b-98d9-47a4e30256fe) I added a helper method to convert the `List<Float>` to base64 encoded string. Thus, when user passes a list of floats, they will have base64encoded string as well.
public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Double> embeddingInDouble = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInDouble = reader.readArray(JsonReader::getDouble); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } return new EmbeddingItem(embedding, embeddingInDouble, promptIndex); }); }
embeddingInDouble = reader.readArray(JsonReader::getDouble);
public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Float> embeddingInFloat = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInFloat = reader.readArray(JsonReader::getFloat); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } EmbeddingItem embeddingItem = new EmbeddingItem(embedding, promptIndex); embeddingItem.embeddingInFloat = embeddingInFloat; return embeddingItem; }); }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ @Generated private final List<Double> embedding; private final String embeddingBase64; /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, List<Double> embedding, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; this.embedding = embedding; } /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embedding != null) { return embedding.stream().map(Double::floatValue).collect(Collectors.toList()); } return convertBase64ToFloatList(embeddingBase64); } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { return embeddingBase64; } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeStringField("object", "embedding"); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embedding != null) { jsonWriter.writeArrayField("embedding", embedding, JsonWriter::writeDouble); } return jsonWriter.writeEndObject(); } /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embeddingInFloat == null && embeddingBase64 != null) { embeddingInFloat = convertBase64ToFloatList(embeddingBase64); } return embeddingInFloat; } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { if (embeddingBase64 == null && embeddingInFloat != null) { embeddingBase64 = convertFloatListToBase64(embeddingInFloat); } return embeddingBase64; } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embeddingInFloat != null) { jsonWriter.writeArrayField("embedding", embeddingInFloat, JsonWriter::writeFloat); } return jsonWriter.writeEndObject(); } /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; } private List<Double> embedding; /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ private List<Float> embeddingInFloat; /* * List of embeddings value in base64 format for the input prompt. */ private String embeddingBase64; }
We don't want to call convert every time `getEmbedding` method is called. ```suggestion embedding = convertBase64ToFloatList(embeddingBase64); return embedding; ```
public List<Float> getEmbedding() { if (embedding != null) { return embedding.stream().map(Double::floatValue).collect(Collectors.toList()); } return convertBase64ToFloatList(embeddingBase64); }
return convertBase64ToFloatList(embeddingBase64);
public List<Float> getEmbedding() { if (embeddingInFloat == null && embeddingBase64 != null) { embeddingInFloat = convertBase64ToFloatList(embeddingBase64); } return embeddingInFloat; }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ @Generated private final List<Double> embedding; private final String embeddingBase64; /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, List<Double> embedding, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; this.embedding = embedding; } /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { if (embeddingBase64 != null) { return embeddingBase64; } return convertFloatListToBase64(embedding.stream().map(Double::floatValue).collect(Collectors.toList())); } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeStringField("object", "embedding"); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embedding != null) { jsonWriter.writeArrayField("embedding", embedding, JsonWriter::writeDouble); } return jsonWriter.writeEndObject(); } /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Double> embeddingInDouble = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInDouble = reader.readArray(JsonReader::getDouble); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } return new EmbeddingItem(embedding, embeddingInDouble, promptIndex); }); } }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { if (embeddingBase64 == null && embeddingInFloat != null) { embeddingBase64 = convertFloatListToBase64(embeddingInFloat); } return embeddingBase64; } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embeddingInFloat != null) { jsonWriter.writeArrayField("embedding", embeddingInFloat, JsonWriter::writeFloat); } return jsonWriter.writeEndObject(); } /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Float> embeddingInFloat = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInFloat = reader.readArray(JsonReader::getFloat); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } EmbeddingItem embeddingItem = new EmbeddingItem(embedding, promptIndex); embeddingItem.embeddingInFloat = embeddingInFloat; return embeddingItem; }); } /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; } private List<Double> embedding; /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ private List<Float> embeddingInFloat; /* * List of embeddings value in base64 format for the input prompt. */ private String embeddingBase64; }
Same here - we should convert only once. ```suggestion embeddingBase64 = convertFloatListToBase64(embedding.stream().map(Double::floatValue).collect(Collectors.toList())); return embeddingBase64; ```
public String getEmbeddingAsString() { if (embeddingBase64 != null) { return embeddingBase64; } return convertFloatListToBase64(embedding.stream().map(Double::floatValue).collect(Collectors.toList())); }
return convertFloatListToBase64(embedding.stream().map(Double::floatValue).collect(Collectors.toList()));
public String getEmbeddingAsString() { if (embeddingBase64 == null && embeddingInFloat != null) { embeddingBase64 = convertFloatListToBase64(embeddingInFloat); } return embeddingBase64; }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ @Generated private final List<Double> embedding; private final String embeddingBase64; /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, List<Double> embedding, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; this.embedding = embedding; } /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embedding != null) { return embedding.stream().map(Double::floatValue).collect(Collectors.toList()); } return convertBase64ToFloatList(embeddingBase64); } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeStringField("object", "embedding"); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embedding != null) { jsonWriter.writeArrayField("embedding", embedding, JsonWriter::writeDouble); } return jsonWriter.writeEndObject(); } /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Double> embeddingInDouble = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInDouble = reader.readArray(JsonReader::getDouble); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } return new EmbeddingItem(embedding, embeddingInDouble, promptIndex); }); } }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embeddingInFloat == null && embeddingBase64 != null) { embeddingInFloat = convertBase64ToFloatList(embeddingBase64); } return embeddingInFloat; } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embeddingInFloat != null) { jsonWriter.writeArrayField("embedding", embeddingInFloat, JsonWriter::writeFloat); } return jsonWriter.writeEndObject(); } /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Float> embeddingInFloat = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInFloat = reader.readArray(JsonReader::getFloat); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } EmbeddingItem embeddingItem = new EmbeddingItem(embedding, promptIndex); embeddingItem.embeddingInFloat = embeddingInFloat; return embeddingItem; }); } /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; } private List<Double> embedding; /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ private List<Float> embeddingInFloat; /* * List of embeddings value in base64 format for the input prompt. */ private String embeddingBase64; }
Why is this added?
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeStringField("object", "embedding"); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embedding != null) { jsonWriter.writeArrayField("embedding", embedding, JsonWriter::writeDouble); } return jsonWriter.writeEndObject(); }
jsonWriter.writeStringField("object", "embedding");
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embeddingInFloat != null) { jsonWriter.writeArrayField("embedding", embeddingInFloat, JsonWriter::writeFloat); } return jsonWriter.writeEndObject(); }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ @Generated private final List<Double> embedding; private final String embeddingBase64; /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, List<Double> embedding, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; this.embedding = embedding; } /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embedding != null) { return embedding.stream().map(Double::floatValue).collect(Collectors.toList()); } return convertBase64ToFloatList(embeddingBase64); } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { if (embeddingBase64 != null) { return embeddingBase64; } return convertFloatListToBase64(embedding.stream().map(Double::floatValue).collect(Collectors.toList())); } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Double> embeddingInDouble = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInDouble = reader.readArray(JsonReader::getDouble); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } return new EmbeddingItem(embedding, embeddingInDouble, promptIndex); }); } }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embeddingInFloat == null && embeddingBase64 != null) { embeddingInFloat = convertBase64ToFloatList(embeddingBase64); } return embeddingInFloat; } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { if (embeddingBase64 == null && embeddingInFloat != null) { embeddingBase64 = convertFloatListToBase64(embeddingInFloat); } return embeddingBase64; } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Float> embeddingInFloat = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInFloat = reader.readArray(JsonReader::getFloat); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } EmbeddingItem embeddingItem = new EmbeddingItem(embedding, promptIndex); embeddingItem.embeddingInFloat = embeddingInFloat; return embeddingItem; }); } /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; } private List<Double> embedding; /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ private List<Float> embeddingInFloat; /* * List of embeddings value in base64 format for the input prompt. */ private String embeddingBase64; }
(1) OpenAI's payload has this field "object": "embedding" image [API Reference - OpenAI API](https://platform.openai.com/docs/api-reference/embeddings/object) (2) Generated code has used the same pattern: image [[OpenAI-Inference] Inference SDK support for Azure OpenAI `v2024-07-01-preview` service release by jpalvarezl · Pull Request #41210 · Azure/azure-sdk-for-java (github.com)](https://github.com/Azure/azure-sdk-for-java/pull/41210/files#diff-1158b0e826e3bd10ddd27567b1fa563dfda502395a7a52d02a95f56575a7b0b2R25)
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeStringField("object", "embedding"); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embedding != null) { jsonWriter.writeArrayField("embedding", embedding, JsonWriter::writeDouble); } return jsonWriter.writeEndObject(); }
jsonWriter.writeStringField("object", "embedding");
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); jsonWriter.writeIntField("index", promptIndex); if (embeddingBase64 != null) { jsonWriter.writeStringField("embedding", embeddingBase64); } else if (embeddingInFloat != null) { jsonWriter.writeArrayField("embedding", embeddingInFloat, JsonWriter::writeFloat); } return jsonWriter.writeEndObject(); }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ @Generated private final List<Double> embedding; private final String embeddingBase64; /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, List<Double> embedding, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; this.embedding = embedding; } /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embedding != null) { return embedding.stream().map(Double::floatValue).collect(Collectors.toList()); } return convertBase64ToFloatList(embeddingBase64); } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { if (embeddingBase64 != null) { return embeddingBase64; } return convertFloatListToBase64(embedding.stream().map(Double::floatValue).collect(Collectors.toList())); } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Double> embeddingInDouble = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInDouble = reader.readArray(JsonReader::getDouble); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } return new EmbeddingItem(embedding, embeddingInDouble, promptIndex); }); } }
class EmbeddingItem implements JsonSerializable<EmbeddingItem> { /** * Get the embedding property: List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. * * @return the embedding value. */ public List<Float> getEmbedding() { if (embeddingInFloat == null && embeddingBase64 != null) { embeddingInFloat = convertBase64ToFloatList(embeddingBase64); } return embeddingInFloat; } /** * Get the embedding property: List of embeddings value in base64 format for the input prompt. * * @return the embedding base64 encoded string. */ public String getEmbeddingAsString() { if (embeddingBase64 == null && embeddingInFloat != null) { embeddingBase64 = convertFloatListToBase64(embeddingInFloat); } return embeddingBase64; } /* * Index of the prompt to which the EmbeddingItem corresponds. */ @Generated private final int promptIndex; /** * Get the promptIndex property: Index of the prompt to which the EmbeddingItem corresponds. * * @return the promptIndex value. */ @Generated public int getPromptIndex() { return this.promptIndex; } /** * {@inheritDoc} */ @Override /** * Reads an instance of EmbeddingItem from the JsonReader. * * @param jsonReader The JsonReader being read. * @return An instance of EmbeddingItem if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the EmbeddingItem. */ public static EmbeddingItem fromJson(JsonReader jsonReader) throws IOException { return jsonReader.readObject(reader -> { String embedding = null; List<Float> embeddingInFloat = null; int promptIndex = 0; while (reader.nextToken() != JsonToken.END_OBJECT) { String fieldName = reader.getFieldName(); reader.nextToken(); if ("embedding".equals(fieldName)) { JsonToken jsonToken = reader.currentToken(); if (jsonToken == JsonToken.STRING) { embedding = reader.getString(); } else if (jsonToken == JsonToken.START_ARRAY) { embeddingInFloat = reader.readArray(JsonReader::getFloat); } else { throw new IllegalStateException("Unexpected 'embedding' type found when deserializing" + " EmbeddingItem JSON object: " + jsonToken); } } else if ("index".equals(fieldName)) { promptIndex = reader.getInt(); } else { reader.skipChildren(); } } EmbeddingItem embeddingItem = new EmbeddingItem(embedding, promptIndex); embeddingItem.embeddingInFloat = embeddingInFloat; return embeddingItem; }); } /** * Creates an instance of EmbeddingItem class. * * @param embeddingBase64 the embedding value to set. * @param promptIndex the promptIndex value to set. */ private EmbeddingItem(String embeddingBase64, int promptIndex) { this.embeddingBase64 = embeddingBase64; this.promptIndex = promptIndex; } private List<Double> embedding; /* * List of embeddings value for the input prompt. These represent a measurement of the * vector-based relatedness of the provided input. */ private List<Float> embeddingInFloat; /* * List of embeddings value in base64 format for the input prompt. */ private String embeddingBase64; }
Could we add some comments here, the logic is a little bit complicated
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof BindingServiceProperties bindingServiceProperties) { if (bindingServiceProperties.getBinders().isEmpty()) { String defaultBinder = bindingServiceProperties.getDefaultBinder(); if (!StringUtils.hasText(defaultBinder) || DEFAULT_KAFKA_BINDER_NAME.equalsIgnoreCase(defaultBinder)) { BinderTypeRegistry binderTypeRegistry = applicationContext.getBean(BinderTypeRegistry.class); Map<String, BinderType> allBinders = binderTypeRegistry.getAll(); if (allBinders.isEmpty() || (allBinders.containsKey(DEFAULT_KAFKA_BINDER_NAME) && allBinders.size() == 1)) { Map<String, Object> environment = new HashMap<>(); Map<String, Object> springMainPropertiesMap = getOrCreateSpringMainPropertiesMap(environment); configureSpringMainSources(springMainPropertiesMap); BinderProperties defaultKafkaBinder = new BinderProperties(); defaultKafkaBinder.setEnvironment(environment); Map<String, BinderProperties> binders = new HashMap<>(); binders.put(DEFAULT_KAFKA_BINDER_NAME, defaultKafkaBinder); bindingServiceProperties.setBinders(binders); } } } else { for (Map.Entry<String, BinderProperties> entry : bindingServiceProperties.getBinders().entrySet()) { if (entry.getKey() != null && entry.getValue() != null) { boolean isBinderTypeKafka = KAFKA_BINDER_TYPE.equalsIgnoreCase(entry.getValue().getType()); boolean isBinderNameKafka = DEFAULT_KAFKA_BINDER_NAME.equalsIgnoreCase(entry.getKey()); if (isBinderTypeKafka || isBinderNameKafka) { Map<String, Object> environment = entry.getValue().getEnvironment(); Map<String, Object> springMainPropertiesMap = getOrCreateSpringMainPropertiesMap(environment); configureSpringMainSources(springMainPropertiesMap); } } } } } return bean; }
}
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof BindingServiceProperties bindingServiceProperties) { if (bindingServiceProperties.getBinders().isEmpty()) { String defaultBinder = bindingServiceProperties.getDefaultBinder(); if (!StringUtils.hasText(defaultBinder) || DEFAULT_KAFKA_BINDER_NAME.equalsIgnoreCase(defaultBinder)) { BinderTypeRegistry binderTypeRegistry = applicationContext.getBean(BinderTypeRegistry.class); Map<String, BinderType> allBinders = binderTypeRegistry.getAll(); if (allBinders != null && allBinders.containsKey(DEFAULT_KAFKA_BINDER_NAME) && allBinders.size() == 1) { Map<String, Object> environment = new HashMap<>(); Map<String, Object> springMainPropertiesMap = getOrCreateSpringMainPropertiesMap(environment); configureSpringMainSources(springMainPropertiesMap); BinderProperties defaultKafkaBinder = new BinderProperties(); defaultKafkaBinder.setEnvironment(environment); Map<String, BinderProperties> binders = new HashMap<>(); binders.put(DEFAULT_KAFKA_BINDER_NAME, defaultKafkaBinder); bindingServiceProperties.setBinders(binders); } } } else { for (Map.Entry<String, BinderProperties> entry : bindingServiceProperties.getBinders().entrySet()) { if (entry.getKey() != null && entry.getValue() != null) { boolean isBinderTypeKafka = KAFKA_BINDER_TYPE.equalsIgnoreCase(entry.getValue().getType()); boolean isBinderNameKafka = DEFAULT_KAFKA_BINDER_NAME.equalsIgnoreCase(entry.getKey()); if (isBinderTypeKafka || isBinderNameKafka) { Map<String, Object> environment = entry.getValue().getEnvironment(); Map<String, Object> springMainPropertiesMap = getOrCreateSpringMainPropertiesMap(environment); configureSpringMainSources(springMainPropertiesMap); } } } } } return bean; }
class BindingServicePropertiesBeanPostProcessor implements BeanPostProcessor, ApplicationContextAware { static final String SPRING_MAIN_SOURCES_PROPERTY = "spring.main.sources"; static final String KAFKA_OAUTH2_SPRING_MAIN_SOURCES = String.join(",", AzureKafkaSpringCloudStreamConfiguration.class.getName(), AzureEventHubsKafkaOAuth2AutoConfiguration.class.getName()); private static final String DEFAULT_KAFKA_BINDER_NAME = "kafka"; private static final String KAFKA_BINDER_TYPE = "kafka"; private GenericApplicationContext applicationContext; @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.applicationContext = (GenericApplicationContext) applicationContext; } @Override void configureSpringMainSources(Map<String, Object> springMainPropertiesMap) { String sources = KAFKA_OAUTH2_SPRING_MAIN_SOURCES; if (StringUtils.hasText((String) springMainPropertiesMap.get("sources"))) { sources += "," + springMainPropertiesMap.get("sources"); } springMainPropertiesMap.put("sources", sources); } @SuppressWarnings("unchecked") Map<String, Object> getOrCreateSpringMainPropertiesMap(Map<String, Object> map) { Map<String, Object> spring = (Map<String, Object>) map.computeIfAbsent("spring", k -> new LinkedHashMap<String, Object>()); return (Map<String, Object>) spring.computeIfAbsent("main", k -> new LinkedHashMap<String, Object>()); } }
class BindingServicePropertiesBeanPostProcessor implements BeanPostProcessor, ApplicationContextAware { static final String SPRING_MAIN_SOURCES_PROPERTY = "spring.main.sources"; static final String KAFKA_OAUTH2_SPRING_MAIN_SOURCES = String.join(",", AzureKafkaSpringCloudStreamConfiguration.class.getName(), AzureEventHubsKafkaOAuth2AutoConfiguration.class.getName()); private static final String DEFAULT_KAFKA_BINDER_NAME = "kafka"; private static final String KAFKA_BINDER_TYPE = "kafka"; private ApplicationContext applicationContext; @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.applicationContext = applicationContext; } @Override void configureSpringMainSources(Map<String, Object> springMainPropertiesMap) { String sources = KAFKA_OAUTH2_SPRING_MAIN_SOURCES; if (StringUtils.hasText((String) springMainPropertiesMap.get("sources"))) { sources += "," + springMainPropertiesMap.get("sources"); } springMainPropertiesMap.put("sources", sources); } @SuppressWarnings("unchecked") Map<String, Object> getOrCreateSpringMainPropertiesMap(Map<String, Object> map) { Map<String, Object> spring = (Map<String, Object>) map.computeIfAbsent("spring", k -> new LinkedHashMap<String, Object>()); return (Map<String, Object>) spring.computeIfAbsent("main", k -> new LinkedHashMap<String, Object>()); } }
```suggestion // We need to add the Kafka configuration to the binder. So if Kafka could be the default binder, explicitly add the default binder, and add configurations. ```
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof BindingServiceProperties bindingServiceProperties) { if (bindingServiceProperties.getBinders().isEmpty()) { String defaultBinder = bindingServiceProperties.getDefaultBinder(); if (!StringUtils.hasText(defaultBinder) || DEFAULT_KAFKA_BINDER_NAME.equalsIgnoreCase(defaultBinder)) { BinderTypeRegistry binderTypeRegistry = applicationContext.getBean(BinderTypeRegistry.class); Map<String, BinderType> allBinders = binderTypeRegistry.getAll(); if (allBinders != null && allBinders.containsKey(DEFAULT_KAFKA_BINDER_NAME) && allBinders.size() == 1) { Map<String, Object> environment = new HashMap<>(); Map<String, Object> springMainPropertiesMap = getOrCreateSpringMainPropertiesMap(environment); configureSpringMainSources(springMainPropertiesMap); BinderProperties defaultKafkaBinder = new BinderProperties(); defaultKafkaBinder.setEnvironment(environment); Map<String, BinderProperties> binders = new HashMap<>(); binders.put(DEFAULT_KAFKA_BINDER_NAME, defaultKafkaBinder); bindingServiceProperties.setBinders(binders); } } } else { for (Map.Entry<String, BinderProperties> entry : bindingServiceProperties.getBinders().entrySet()) { if (entry.getKey() != null && entry.getValue() != null) { boolean isBinderTypeKafka = KAFKA_BINDER_TYPE.equalsIgnoreCase(entry.getValue().getType()); boolean isBinderNameKafka = DEFAULT_KAFKA_BINDER_NAME.equalsIgnoreCase(entry.getKey()); if (isBinderTypeKafka || isBinderNameKafka) { Map<String, Object> environment = entry.getValue().getEnvironment(); Map<String, Object> springMainPropertiesMap = getOrCreateSpringMainPropertiesMap(environment); configureSpringMainSources(springMainPropertiesMap); } } } } } return bean; }
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof BindingServiceProperties bindingServiceProperties) { if (bindingServiceProperties.getBinders().isEmpty()) { String defaultBinder = bindingServiceProperties.getDefaultBinder(); if (!StringUtils.hasText(defaultBinder) || DEFAULT_KAFKA_BINDER_NAME.equalsIgnoreCase(defaultBinder)) { BinderTypeRegistry binderTypeRegistry = applicationContext.getBean(BinderTypeRegistry.class); Map<String, BinderType> allBinders = binderTypeRegistry.getAll(); if (allBinders != null && allBinders.containsKey(DEFAULT_KAFKA_BINDER_NAME) && allBinders.size() == 1) { Map<String, Object> environment = new HashMap<>(); Map<String, Object> springMainPropertiesMap = getOrCreateSpringMainPropertiesMap(environment); configureSpringMainSources(springMainPropertiesMap); BinderProperties defaultKafkaBinder = new BinderProperties(); defaultKafkaBinder.setEnvironment(environment); Map<String, BinderProperties> binders = new HashMap<>(); binders.put(DEFAULT_KAFKA_BINDER_NAME, defaultKafkaBinder); bindingServiceProperties.setBinders(binders); } } } else { for (Map.Entry<String, BinderProperties> entry : bindingServiceProperties.getBinders().entrySet()) { if (entry.getKey() != null && entry.getValue() != null) { boolean isBinderTypeKafka = KAFKA_BINDER_TYPE.equalsIgnoreCase(entry.getValue().getType()); boolean isBinderNameKafka = DEFAULT_KAFKA_BINDER_NAME.equalsIgnoreCase(entry.getKey()); if (isBinderTypeKafka || isBinderNameKafka) { Map<String, Object> environment = entry.getValue().getEnvironment(); Map<String, Object> springMainPropertiesMap = getOrCreateSpringMainPropertiesMap(environment); configureSpringMainSources(springMainPropertiesMap); } } } } } return bean; }
class BindingServicePropertiesBeanPostProcessor implements BeanPostProcessor, ApplicationContextAware { static final String SPRING_MAIN_SOURCES_PROPERTY = "spring.main.sources"; static final String KAFKA_OAUTH2_SPRING_MAIN_SOURCES = String.join(",", AzureKafkaSpringCloudStreamConfiguration.class.getName(), AzureEventHubsKafkaOAuth2AutoConfiguration.class.getName()); private static final String DEFAULT_KAFKA_BINDER_NAME = "kafka"; private static final String KAFKA_BINDER_TYPE = "kafka"; private ApplicationContext applicationContext; @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.applicationContext = applicationContext; } @Override void configureSpringMainSources(Map<String, Object> springMainPropertiesMap) { String sources = KAFKA_OAUTH2_SPRING_MAIN_SOURCES; if (StringUtils.hasText((String) springMainPropertiesMap.get("sources"))) { sources += "," + springMainPropertiesMap.get("sources"); } springMainPropertiesMap.put("sources", sources); } @SuppressWarnings("unchecked") Map<String, Object> getOrCreateSpringMainPropertiesMap(Map<String, Object> map) { Map<String, Object> spring = (Map<String, Object>) map.computeIfAbsent("spring", k -> new LinkedHashMap<String, Object>()); return (Map<String, Object>) spring.computeIfAbsent("main", k -> new LinkedHashMap<String, Object>()); } }
class BindingServicePropertiesBeanPostProcessor implements BeanPostProcessor, ApplicationContextAware { static final String SPRING_MAIN_SOURCES_PROPERTY = "spring.main.sources"; static final String KAFKA_OAUTH2_SPRING_MAIN_SOURCES = String.join(",", AzureKafkaSpringCloudStreamConfiguration.class.getName(), AzureEventHubsKafkaOAuth2AutoConfiguration.class.getName()); private static final String DEFAULT_KAFKA_BINDER_NAME = "kafka"; private static final String KAFKA_BINDER_TYPE = "kafka"; private ApplicationContext applicationContext; @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.applicationContext = applicationContext; } @Override void configureSpringMainSources(Map<String, Object> springMainPropertiesMap) { String sources = KAFKA_OAUTH2_SPRING_MAIN_SOURCES; if (StringUtils.hasText((String) springMainPropertiesMap.get("sources"))) { sources += "," + springMainPropertiesMap.get("sources"); } springMainPropertiesMap.put("sources", sources); } @SuppressWarnings("unchecked") Map<String, Object> getOrCreateSpringMainPropertiesMap(Map<String, Object> map) { Map<String, Object> spring = (Map<String, Object>) map.computeIfAbsent("spring", k -> new LinkedHashMap<String, Object>()); return (Map<String, Object>) spring.computeIfAbsent("main", k -> new LinkedHashMap<String, Object>()); } }
I'm thinking we can set then also set the default `authenticatedRegionDataLength` of 4MB as part of the options class as well.
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); int finalGcmEncryptionRegionLength = this.gcmEncryptionRegionLength == 0 ? GCM_ENCRYPTION_REGION_LENGTH : this.gcmEncryptionRegionLength; return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion, requiresEncryption, finalGcmEncryptionRegionLength); }
int finalGcmEncryptionRegionLength = this.gcmEncryptionRegionLength == 0 ? GCM_ENCRYPTION_REGION_LENGTH : this.gcmEncryptionRegionLength;
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); this.clientSideEncryptionOptions = this.clientSideEncryptionOptions == null ? new BlobClientSideEncryptionOptions() : this.clientSideEncryptionOptions; return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion, requiresEncryption, clientSideEncryptionOptions); }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String USER_AGENT_MODIFICATION_REGEX = "(.*? )?(azsdk-java-azure-storage-blob/12\\.\\d{1,2}\\.\\d{1,2}(?:-beta\\.\\d{1,2})?)( .*?)?"; private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private final EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; private int gcmEncryptionRegionLength; /** * Creates a new instance of the EncryptedBlobClientBuilder * @deprecated Use {@link EncryptedBlobClientBuilder */ @Deprecated public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = EncryptionVersion.V1; LOGGER.warning("Client is being configured to use v1 of client side encryption, " + "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly" + "recommended the version be set to v2 using the constructor"); } /** * Creates a new instance of the EncryptedBlobClientbuilder. * * @param version The version of the client side encryption protocol to use. It is highly recommended that v2 be * preferred for security reasons, though v1 continues to be supported for compatibility reasons. Note that even a * client configured to encrypt using v2 can decrypt blobs that use the v1 protocol. */ @SuppressWarnings("deprecation") public EncryptedBlobClientBuilder(EncryptionVersion version) { Objects.requireNonNull(version); logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = version; if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, " + "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly" + "recommended the version be set to v2 using the constructor"); } } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .containerName& * .blobName& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .containerName& * .blobName& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .tracer(pipeline.getTracer()) .build(); } private String modifyUserAgentString(String applicationId, Configuration userAgentConfiguration) { Pattern pattern = Pattern.compile(USER_AGENT_MODIFICATION_REGEX); String userAgent = UserAgentUtil.toUserAgentString(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration); Matcher matcher = pattern.matcher(userAgent); String version = encryptionVersion == EncryptionVersion.V2 ? "2.0" : "1.0"; String stringToAppend = "azstorage-clientsideencryption/" + version; if (matcher.matches() && !userAgent.contains(stringToAppend)) { String segment1 = matcher.group(1) == null ? "" : matcher.group(1); String segment2 = matcher.group(2) == null ? "" : matcher.group(2); String segment3 = matcher.group(3) == null ? "" : matcher.group(3); userAgent = segment1 + stringToAppend + " " + segment2 + segment3; } return userAgent; } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .tracer(httpPipeline.getTracer()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); String applicationId = CoreUtils.getApplicationId(clientOptions, logOptions); String modifiedUserAgent = modifyUserAgentString(applicationId, userAgentConfiguration); policies.add(new UserAgentPolicy(modifiedUserAgent)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = CoreUtils.createHttpHeadersFromClientOptions(clientOptions); if (headers != null) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(HttpHeaderName.X_MS_CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256_HEADER_NAME) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .tracer(createTracer(clientOptions)) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : parts.getBlobName(); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * <p> * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } /** * Sets the partition chunk size * @param gcmEncryptionRegionLength the region length used for encrypting the blob * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder gcmEncryptionRegionLength(int gcmEncryptionRegionLength) { if (gcmEncryptionRegionLength <= 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Region length needs to be greater than 0.")); } this.gcmEncryptionRegionLength = gcmEncryptionRegionLength; return this; } }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String USER_AGENT_MODIFICATION_REGEX = "(.*? )?(azsdk-java-azure-storage-blob/12\\.\\d{1,2}\\.\\d{1,2}(?:-beta\\.\\d{1,2})?)( .*?)?"; private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private final EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; private BlobClientSideEncryptionOptions clientSideEncryptionOptions; /** * Creates a new instance of the EncryptedBlobClientBuilder * @deprecated Use {@link EncryptedBlobClientBuilder */ @Deprecated public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = EncryptionVersion.V1; LOGGER.warning("Client is being configured to use v1 of client side encryption, " + "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly" + "recommended the version be set to v2 using the constructor"); } /** * Creates a new instance of the EncryptedBlobClientbuilder. * * @param version The version of the client side encryption protocol to use. It is highly recommended that v2 be * preferred for security reasons, though v1 continues to be supported for compatibility reasons. Note that even a * client configured to encrypt using v2 can decrypt blobs that use the v1 protocol. */ @SuppressWarnings("deprecation") public EncryptedBlobClientBuilder(EncryptionVersion version) { Objects.requireNonNull(version); logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = version; if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, " + "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly" + "recommended the version be set to v2 using the constructor"); } } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .containerName& * .blobName& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .containerName& * .blobName& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .tracer(pipeline.getTracer()) .build(); } private String modifyUserAgentString(String applicationId, Configuration userAgentConfiguration) { Pattern pattern = Pattern.compile(USER_AGENT_MODIFICATION_REGEX); String userAgent = UserAgentUtil.toUserAgentString(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration); Matcher matcher = pattern.matcher(userAgent); String version = encryptionVersion == EncryptionVersion.V2 ? "2.0" : "1.0"; String stringToAppend = "azstorage-clientsideencryption/" + version; if (matcher.matches() && !userAgent.contains(stringToAppend)) { String segment1 = matcher.group(1) == null ? "" : matcher.group(1); String segment2 = matcher.group(2) == null ? "" : matcher.group(2); String segment3 = matcher.group(3) == null ? "" : matcher.group(3); userAgent = segment1 + stringToAppend + " " + segment2 + segment3; } return userAgent; } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .tracer(httpPipeline.getTracer()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); String applicationId = CoreUtils.getApplicationId(clientOptions, logOptions); String modifiedUserAgent = modifyUserAgentString(applicationId, userAgentConfiguration); policies.add(new UserAgentPolicy(modifiedUserAgent)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = CoreUtils.createHttpHeadersFromClientOptions(clientOptions); if (headers != null) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(HttpHeaderName.X_MS_CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256_HEADER_NAME) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .tracer(createTracer(clientOptions)) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : parts.getBlobName(); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * <p> * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } /** * Sets the encryption options for the blob. * * @param clientSideEncryptionOptions The {@link BlobClientSideEncryptionOptions} for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder clientSideEncryptionOptions( BlobClientSideEncryptionOptions clientSideEncryptionOptions) { this.clientSideEncryptionOptions = clientSideEncryptionOptions; return this; } }
Instead of setting the default encryption region length here. Could we just make it a default on the encryption options class [here](https://github.com/Azure/azure-sdk-for-java/pull/41685/files#diff-8251f2361f9caaadf10fd5a3cfd57816bac7d8ef2f47519b48a11d9ff0b610c4R13)? I'd imagine we do something similar to [`DownloadRetryOptions`](https://github.com/Azure/azure-sdk-for-java/blob/33541c17a1556576702c0c688f6fe989d659382c/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/models/DownloadRetryOptions.java#L26). If that is something we can do, I think it would be beneficial so that all default logic is encapsulated in that class especially if we plan to expand the use of encryption option class to other methods.
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); if (this.blobEncryptionOptions == null) { this.blobEncryptionOptions = new BlobEncryptionOptions(); } if (this.blobEncryptionOptions.getAuthenticatedRegionDataLength() == 0) { this.blobEncryptionOptions.setAuthenticatedRegionDataLength(GCM_ENCRYPTION_REGION_LENGTH); } return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion, requiresEncryption, blobEncryptionOptions); }
}
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); this.clientSideEncryptionOptions = this.clientSideEncryptionOptions == null ? new BlobClientSideEncryptionOptions() : this.clientSideEncryptionOptions; return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion, requiresEncryption, clientSideEncryptionOptions); }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String USER_AGENT_MODIFICATION_REGEX = "(.*? )?(azsdk-java-azure-storage-blob/12\\.\\d{1,2}\\.\\d{1,2}(?:-beta\\.\\d{1,2})?)( .*?)?"; private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private final EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; private BlobEncryptionOptions blobEncryptionOptions; /** * Creates a new instance of the EncryptedBlobClientBuilder * @deprecated Use {@link EncryptedBlobClientBuilder */ @Deprecated public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = EncryptionVersion.V1; LOGGER.warning("Client is being configured to use v1 of client side encryption, " + "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly" + "recommended the version be set to v2 using the constructor"); } /** * Creates a new instance of the EncryptedBlobClientbuilder. * * @param version The version of the client side encryption protocol to use. It is highly recommended that v2 be * preferred for security reasons, though v1 continues to be supported for compatibility reasons. Note that even a * client configured to encrypt using v2 can decrypt blobs that use the v1 protocol. */ @SuppressWarnings("deprecation") public EncryptedBlobClientBuilder(EncryptionVersion version) { Objects.requireNonNull(version); logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = version; if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, " + "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly" + "recommended the version be set to v2 using the constructor"); } } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .containerName& * .blobName& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .containerName& * .blobName& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .tracer(pipeline.getTracer()) .build(); } private String modifyUserAgentString(String applicationId, Configuration userAgentConfiguration) { Pattern pattern = Pattern.compile(USER_AGENT_MODIFICATION_REGEX); String userAgent = UserAgentUtil.toUserAgentString(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration); Matcher matcher = pattern.matcher(userAgent); String version = encryptionVersion == EncryptionVersion.V2 ? "2.0" : "1.0"; String stringToAppend = "azstorage-clientsideencryption/" + version; if (matcher.matches() && !userAgent.contains(stringToAppend)) { String segment1 = matcher.group(1) == null ? "" : matcher.group(1); String segment2 = matcher.group(2) == null ? "" : matcher.group(2); String segment3 = matcher.group(3) == null ? "" : matcher.group(3); userAgent = segment1 + stringToAppend + " " + segment2 + segment3; } return userAgent; } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .tracer(httpPipeline.getTracer()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); String applicationId = CoreUtils.getApplicationId(clientOptions, logOptions); String modifiedUserAgent = modifyUserAgentString(applicationId, userAgentConfiguration); policies.add(new UserAgentPolicy(modifiedUserAgent)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = CoreUtils.createHttpHeadersFromClientOptions(clientOptions); if (headers != null) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(HttpHeaderName.X_MS_CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256_HEADER_NAME) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .tracer(createTracer(clientOptions)) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : parts.getBlobName(); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * <p> * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } /** * Sets the encryption options for the blob. * * @param options The {@link BlobEncryptionOptions} for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder blobEncryptionOptions(BlobEncryptionOptions options) { this.blobEncryptionOptions = options; return this; } }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String USER_AGENT_MODIFICATION_REGEX = "(.*? )?(azsdk-java-azure-storage-blob/12\\.\\d{1,2}\\.\\d{1,2}(?:-beta\\.\\d{1,2})?)( .*?)?"; private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private final EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; private BlobClientSideEncryptionOptions clientSideEncryptionOptions; /** * Creates a new instance of the EncryptedBlobClientBuilder * @deprecated Use {@link EncryptedBlobClientBuilder */ @Deprecated public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = EncryptionVersion.V1; LOGGER.warning("Client is being configured to use v1 of client side encryption, " + "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly" + "recommended the version be set to v2 using the constructor"); } /** * Creates a new instance of the EncryptedBlobClientbuilder. * * @param version The version of the client side encryption protocol to use. It is highly recommended that v2 be * preferred for security reasons, though v1 continues to be supported for compatibility reasons. Note that even a * client configured to encrypt using v2 can decrypt blobs that use the v1 protocol. */ @SuppressWarnings("deprecation") public EncryptedBlobClientBuilder(EncryptionVersion version) { Objects.requireNonNull(version); logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = version; if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, " + "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly" + "recommended the version be set to v2 using the constructor"); } } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .containerName& * .blobName& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .containerName& * .blobName& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .tracer(pipeline.getTracer()) .build(); } private String modifyUserAgentString(String applicationId, Configuration userAgentConfiguration) { Pattern pattern = Pattern.compile(USER_AGENT_MODIFICATION_REGEX); String userAgent = UserAgentUtil.toUserAgentString(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration); Matcher matcher = pattern.matcher(userAgent); String version = encryptionVersion == EncryptionVersion.V2 ? "2.0" : "1.0"; String stringToAppend = "azstorage-clientsideencryption/" + version; if (matcher.matches() && !userAgent.contains(stringToAppend)) { String segment1 = matcher.group(1) == null ? "" : matcher.group(1); String segment2 = matcher.group(2) == null ? "" : matcher.group(2); String segment3 = matcher.group(3) == null ? "" : matcher.group(3); userAgent = segment1 + stringToAppend + " " + segment2 + segment3; } return userAgent; } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .tracer(httpPipeline.getTracer()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); String applicationId = CoreUtils.getApplicationId(clientOptions, logOptions); String modifiedUserAgent = modifyUserAgentString(applicationId, userAgentConfiguration); policies.add(new UserAgentPolicy(modifiedUserAgent)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = CoreUtils.createHttpHeadersFromClientOptions(clientOptions); if (headers != null) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(HttpHeaderName.X_MS_CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256_HEADER_NAME) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .tracer(createTracer(clientOptions)) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : parts.getBlobName(); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * <p> * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } /** * Sets the encryption options for the blob. * * @param clientSideEncryptionOptions The {@link BlobClientSideEncryptionOptions} for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder clientSideEncryptionOptions( BlobClientSideEncryptionOptions clientSideEncryptionOptions) { this.clientSideEncryptionOptions = clientSideEncryptionOptions; return this; } }
Let's remove this comment and the added comment below it on line 336.
private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .tracer(httpPipeline.getTracer()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); String applicationId = CoreUtils.getApplicationId(clientOptions, logOptions); String modifiedUserAgent = modifyUserAgentString(applicationId, userAgentConfiguration); policies.add(new UserAgentPolicy(modifiedUserAgent)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = CoreUtils.createHttpHeadersFromClientOptions(clientOptions); if (headers != null) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(HttpHeaderName.X_MS_CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256_HEADER_NAME) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .tracer(createTracer(clientOptions)) .build(); }
private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .tracer(httpPipeline.getTracer()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); String applicationId = CoreUtils.getApplicationId(clientOptions, logOptions); String modifiedUserAgent = modifyUserAgentString(applicationId, userAgentConfiguration); policies.add(new UserAgentPolicy(modifiedUserAgent)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = CoreUtils.createHttpHeadersFromClientOptions(clientOptions); if (headers != null) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(HttpHeaderName.X_MS_CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256_HEADER_NAME) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .tracer(createTracer(clientOptions)) .build(); }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String USER_AGENT_MODIFICATION_REGEX = "(.*? )?(azsdk-java-azure-storage-blob/12\\.\\d{1,2}\\.\\d{1,2}(?:-beta\\.\\d{1,2})?)( .*?)?"; private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private final EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; private BlobEncryptionOptions blobEncryptionOptions; /** * Creates a new instance of the EncryptedBlobClientBuilder * @deprecated Use {@link EncryptedBlobClientBuilder */ @Deprecated public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = EncryptionVersion.V1; LOGGER.warning("Client is being configured to use v1 of client side encryption, " + "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly" + "recommended the version be set to v2 using the constructor"); } /** * Creates a new instance of the EncryptedBlobClientbuilder. * * @param version The version of the client side encryption protocol to use. It is highly recommended that v2 be * preferred for security reasons, though v1 continues to be supported for compatibility reasons. Note that even a * client configured to encrypt using v2 can decrypt blobs that use the v1 protocol. */ @SuppressWarnings("deprecation") public EncryptedBlobClientBuilder(EncryptionVersion version) { Objects.requireNonNull(version); logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = version; if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, " + "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly" + "recommended the version be set to v2 using the constructor"); } } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .containerName& * .blobName& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .containerName& * .blobName& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); if (this.blobEncryptionOptions == null) { this.blobEncryptionOptions = new BlobEncryptionOptions(); } if (this.blobEncryptionOptions.getAuthenticatedRegionDataLength() == 0) { this.blobEncryptionOptions.setAuthenticatedRegionDataLength(GCM_ENCRYPTION_REGION_LENGTH); } return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion, requiresEncryption, blobEncryptionOptions); } private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .tracer(pipeline.getTracer()) .build(); } private String modifyUserAgentString(String applicationId, Configuration userAgentConfiguration) { Pattern pattern = Pattern.compile(USER_AGENT_MODIFICATION_REGEX); String userAgent = UserAgentUtil.toUserAgentString(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration); Matcher matcher = pattern.matcher(userAgent); String version = encryptionVersion == EncryptionVersion.V2 ? "2.0" : "1.0"; String stringToAppend = "azstorage-clientsideencryption/" + version; if (matcher.matches() && !userAgent.contains(stringToAppend)) { String segment1 = matcher.group(1) == null ? "" : matcher.group(1); String segment2 = matcher.group(2) == null ? "" : matcher.group(2); String segment3 = matcher.group(3) == null ? "" : matcher.group(3); userAgent = segment1 + stringToAppend + " " + segment2 + segment3; } return userAgent; } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : parts.getBlobName(); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * <p> * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } /** * Sets the encryption options for the blob. * * @param options The {@link BlobEncryptionOptions} for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder blobEncryptionOptions(BlobEncryptionOptions options) { this.blobEncryptionOptions = options; return this; } }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String USER_AGENT_MODIFICATION_REGEX = "(.*? )?(azsdk-java-azure-storage-blob/12\\.\\d{1,2}\\.\\d{1,2}(?:-beta\\.\\d{1,2})?)( .*?)?"; private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private final EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; private BlobClientSideEncryptionOptions clientSideEncryptionOptions; /** * Creates a new instance of the EncryptedBlobClientBuilder * @deprecated Use {@link EncryptedBlobClientBuilder */ @Deprecated public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = EncryptionVersion.V1; LOGGER.warning("Client is being configured to use v1 of client side encryption, " + "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly" + "recommended the version be set to v2 using the constructor"); } /** * Creates a new instance of the EncryptedBlobClientbuilder. * * @param version The version of the client side encryption protocol to use. It is highly recommended that v2 be * preferred for security reasons, though v1 continues to be supported for compatibility reasons. Note that even a * client configured to encrypt using v2 can decrypt blobs that use the v1 protocol. */ @SuppressWarnings("deprecation") public EncryptedBlobClientBuilder(EncryptionVersion version) { Objects.requireNonNull(version); logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = version; if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, " + "which is no longer considered secure. The default is v1 for compatibility reasons, but it is highly" + "recommended the version be set to v2 using the constructor"); } } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .containerName& * .blobName& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .containerName& * .blobName& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); this.clientSideEncryptionOptions = this.clientSideEncryptionOptions == null ? new BlobClientSideEncryptionOptions() : this.clientSideEncryptionOptions; return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion, requiresEncryption, clientSideEncryptionOptions); } private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .tracer(pipeline.getTracer()) .build(); } private String modifyUserAgentString(String applicationId, Configuration userAgentConfiguration) { Pattern pattern = Pattern.compile(USER_AGENT_MODIFICATION_REGEX); String userAgent = UserAgentUtil.toUserAgentString(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration); Matcher matcher = pattern.matcher(userAgent); String version = encryptionVersion == EncryptionVersion.V2 ? "2.0" : "1.0"; String stringToAppend = "azstorage-clientsideencryption/" + version; if (matcher.matches() && !userAgent.contains(stringToAppend)) { String segment1 = matcher.group(1) == null ? "" : matcher.group(1); String segment2 = matcher.group(2) == null ? "" : matcher.group(2); String segment3 = matcher.group(3) == null ? "" : matcher.group(3); userAgent = segment1 + stringToAppend + " " + segment2 + segment3; } return userAgent; } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : parts.getBlobName(); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Objects.requireNonNull(blobName, "'blobName' cannot be null."); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * <p> * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } /** * Sets the encryption options for the blob. * * @param clientSideEncryptionOptions The {@link BlobClientSideEncryptionOptions} for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder clientSideEncryptionOptions( BlobClientSideEncryptionOptions clientSideEncryptionOptions) { this.clientSideEncryptionOptions = clientSideEncryptionOptions; return this; } }
Yes. I think we should have two more tests added here: 1. A parameterized version where we provide valid region lengths and assert that when we call `getAuthenticatedRegionDataLength`, it matches what we passed in. 2. A test that confirms when we instantiate a new encryption options object and do not set the authenticated region data length the authenticated length value defaults to 4MB
public void illegalRegionLength(long regionLength) { assertThrows(IllegalArgumentException.class, () -> new EncryptedBlobClientBuilder(EncryptionVersion.V2) .blobEncryptionOptions(new BlobEncryptionOptions().setAuthenticatedRegionDataLength(regionLength)) .buildEncryptedBlobClient()); }
public void illegalRegionLength(long regionLength) { assertThrows(IllegalArgumentException.class, () -> new BlobClientSideEncryptionOptions() .setAuthenticatedRegionDataLengthInBytes(regionLength)); }
class EncryptedBlobClientBuilderTests { private static final StorageSharedKeyCredential CREDENTIALS = new StorageSharedKeyCredential("accountName", "accountKey"); private static final String ENDPOINT = "https: private static final RequestRetryOptions REQUEST_RETRY_OPTIONS = new RequestRetryOptions(RetryPolicyType.FIXED, 2, 2, 1000L, 4000L, null); private static final RetryOptions CORE_RETRY_OPTIONS = new RetryOptions(new FixedDelayOptions(1, Duration.ofSeconds(1))); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); private static final List<Header> CLIENT_OPTIONS_HEADERS; private static final Map<HttpHeaderName, String> HEADERS_MAP; static { CLIENT_OPTIONS_HEADERS = new ArrayList<>(); CLIENT_OPTIONS_HEADERS.add(new Header("custom", "header")); CLIENT_OPTIONS_HEADERS.add(new Header("Authorization", "notthis")); CLIENT_OPTIONS_HEADERS.add(new Header("User-Agent", "overwritten")); HEADERS_MAP = new LinkedHashMap<>(); HEADERS_MAP.put(HttpHeaderName.fromString("custom"), "header"); HEADERS_MAP.put(HttpHeaderName.AUTHORIZATION, "notthis"); HEADERS_MAP.put(HttpHeaderName.USER_AGENT, "overwritten"); } private byte[] randomData; @BeforeEach public void setup() { randomData = new byte[256]; new SecureRandom().nextBytes(randomData); } static HttpRequest request(String url) { return new HttpRequest(HttpMethod.HEAD, url); } /** * Tests that a new date will be applied to every retry when using the encrypted blob client builder's default * pipeline. */ @Test public void encryptedBlobClientFreshDateOnRetry() { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("blob") .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .httpClient(new FreshDateTestClient()) .retryOptions(REQUEST_RETRY_OPTIONS) .buildEncryptedBlobClient(); StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl()))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that a user application id will be honored in the UA string when using the encrypted blob client builder's default * pipeline. */ @ParameterizedTest @CsvSource(value = {"log-options-id,,log-options-id", ",client-options-id,client-options-id", "log-options-id,client-options-id,client-options-id" /* Client options preferred over log options */}) public void encryptedBlobClientCustomApplicationIdInUAString(String logOptionsUA, String clientOptionsUA, String expectedUA) { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("blob") .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .httpClient(new ApplicationIdUAStringTestClient(expectedUA)) .httpLogOptions(new HttpLogOptions().setApplicationId(logOptionsUA)) .clientOptions(new ClientOptions().setApplicationId(clientOptionsUA)) .buildEncryptedBlobClient(); StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl()))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that custom headers will be honored when using the encrypted blob client builder's default * pipeline. */ @Test public void encryptedBlobClientCustomHeadersClientOptions() { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("blob") .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .httpClient(new ClientOptionsHeadersTestClient(HEADERS_MAP)) .clientOptions(new ClientOptions().setHeaders(CLIENT_OPTIONS_HEADERS)) .buildEncryptedBlobClient(); StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl()))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } @Test public void doesNotThrowOnAmbiguousCredentialsWithoutAzureSasCredential() { assertDoesNotThrow(() -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("foo") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new StorageSharedKeyCredential("foo", "bar")) .credential(new MockTokenCredential()) .sasToken("foo") .buildEncryptedBlobClient()); } @Test public void throwsOnAmbiguousCredentialsWithAzureSasCredential() { assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new StorageSharedKeyCredential("foo", "bar")) .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new MockTokenCredential()) .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .sasToken("foo") .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT + "?sig=foo") .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); } @Test public void onlyOneRetryOptionsCanBeApplied() { assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("foo") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .retryOptions(REQUEST_RETRY_OPTIONS) .retryOptions(CORE_RETRY_OPTIONS) .buildEncryptedBlobClient()); } @Test public void constructFromBlobClientBlobUserAgentModificationPolicy() { BlobClient blobClient = new BlobClientBuilder() .endpoint(ENDPOINT) .credential(CREDENTIALS) .blobName("foo") .containerName("container") .httpClient(new UAStringTestClient("azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")) .buildClient(); EncryptedBlobClient cryptoClient = new EncryptedBlobClientBuilder() .blobClient(blobClient) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .buildEncryptedBlobClient(); sendAndValidateUserAgentHeader(cryptoClient.getHttpPipeline(), cryptoClient.getBlobUrl()); } @Test public void constructFromNoClientBlobUserAgentModificationPolicy() { EncryptedBlobClient cryptoClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new AzureSasCredential("foo")) .httpClient(new UAStringTestClient("azstorage-clientsideencryption/1.0 azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")) .buildEncryptedBlobClient(); sendAndValidateUserAgentHeader(cryptoClient.getHttpPipeline(), cryptoClient.getBlobUrl()); } private static Stream<Arguments> getNonEncodedBlobNameSupplier() { return Stream.of( Arguments.of("test%test"), Arguments.of("ab2a7d5f-b973-4222-83ba-d0581817a819 %Россия 한국 中国!?/file"), Arguments.of("%E6%96%91%E9%BB%9E"), Arguments.of("斑點")); } @ParameterizedTest @MethodSource("getNonEncodedBlobNameSupplier") public void getNonEncodedBlobName(String originalBlobName) { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName(originalBlobName) .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .buildEncryptedBlobClient(); assertEquals(encryptedBlobClient.getBlobName(), originalBlobName); String encodedName = Utility.urlEncode(originalBlobName); assertTrue(encryptedBlobClient.getBlobUrl().contains(encodedName)); } @ParameterizedTest @ValueSource(longs = { 0, -1, 15, 4L * Constants.GB }) private static void sendAndValidateUserAgentHeader(HttpPipeline pipeline, String url) { boolean foundPolicy = false; for (int i = 0; i < pipeline.getPolicyCount(); i++) { foundPolicy |= (pipeline.getPolicy(i) instanceof BlobUserAgentModificationPolicy); } assertTrue(foundPolicy); StepVerifier.create(pipeline.send(request(url))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } private static final class UAStringTestClient implements HttpClient { private final Pattern pattern; UAStringTestClient(String regex) { this.pattern = Pattern.compile(regex); } @Override public Mono<HttpResponse> send(HttpRequest request) { if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(HttpHeaderName.USER_AGENT))) { throw new RuntimeException("Failed to set 'User-Agent' header."); } Matcher matcher = pattern.matcher(request.getHeaders().getValue(HttpHeaderName.USER_AGENT)); assertTrue(matcher.matches()); return Mono.just(new MockHttpResponse(request, 200)); } } private static final class FreshDateTestClient implements HttpClient { private String firstDate; @Override public Mono<HttpResponse> send(HttpRequest request) { if (firstDate == null) { firstDate = request.getHeaders().getValue(HttpHeaderName.DATE); return Mono.error(new IOException("IOException!")); } assertNotEquals(firstDate, request.getHeaders().getValue(HttpHeaderName.DATE)); return Mono.just(new MockHttpResponse(request, 200)); } } private static final class ApplicationIdUAStringTestClient implements HttpClient { private final String expectedUA; ApplicationIdUAStringTestClient(String expectedUA) { this.expectedUA = expectedUA; } @Override public Mono<HttpResponse> send(HttpRequest request) { assertTrue(request.getHeaders().getValue(HttpHeaderName.USER_AGENT).startsWith(expectedUA)); return Mono.just(new MockHttpResponse(request, 200)); } } private static final class ClientOptionsHeadersTestClient implements HttpClient { private final Map<HttpHeaderName, String> headers; ClientOptionsHeadersTestClient(Map<HttpHeaderName, String> headers) { this.headers = headers; } @Override public Mono<HttpResponse> send(HttpRequest request) { headers.forEach((name, value) -> { if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(name))) { throw new RuntimeException("Failed to set custom header " + name); } if (name == HttpHeaderName.AUTHORIZATION) { if (Objects.equals(value, request.getHeaders().getValue(HttpHeaderName.AUTHORIZATION))) { throw new RuntimeException("Custom header " + name + " did not match expectation."); } } else { if (!Objects.equals(value, request.getHeaders().getValue(name))) { throw new RuntimeException("Custom header " + name + " did not match expectation."); } } }); return Mono.just(new MockHttpResponse(request, 200)); } } }
class EncryptedBlobClientBuilderTests { private static final StorageSharedKeyCredential CREDENTIALS = new StorageSharedKeyCredential("accountName", "accountKey"); private static final String ENDPOINT = "https: private static final RequestRetryOptions REQUEST_RETRY_OPTIONS = new RequestRetryOptions(RetryPolicyType.FIXED, 2, 2, 1000L, 4000L, null); private static final RetryOptions CORE_RETRY_OPTIONS = new RetryOptions(new FixedDelayOptions(1, Duration.ofSeconds(1))); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); private static final List<Header> CLIENT_OPTIONS_HEADERS; private static final Map<HttpHeaderName, String> HEADERS_MAP; static { CLIENT_OPTIONS_HEADERS = new ArrayList<>(); CLIENT_OPTIONS_HEADERS.add(new Header("custom", "header")); CLIENT_OPTIONS_HEADERS.add(new Header("Authorization", "notthis")); CLIENT_OPTIONS_HEADERS.add(new Header("User-Agent", "overwritten")); HEADERS_MAP = new LinkedHashMap<>(); HEADERS_MAP.put(HttpHeaderName.fromString("custom"), "header"); HEADERS_MAP.put(HttpHeaderName.AUTHORIZATION, "notthis"); HEADERS_MAP.put(HttpHeaderName.USER_AGENT, "overwritten"); } private byte[] randomData; @BeforeEach public void setup() { randomData = new byte[256]; new SecureRandom().nextBytes(randomData); } static HttpRequest request(String url) { return new HttpRequest(HttpMethod.HEAD, url); } /** * Tests that a new date will be applied to every retry when using the encrypted blob client builder's default * pipeline. */ @Test public void encryptedBlobClientFreshDateOnRetry() { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("blob") .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .httpClient(new FreshDateTestClient()) .retryOptions(REQUEST_RETRY_OPTIONS) .buildEncryptedBlobClient(); StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl()))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that a user application id will be honored in the UA string when using the encrypted blob client builder's default * pipeline. */ @ParameterizedTest @CsvSource(value = {"log-options-id,,log-options-id", ",client-options-id,client-options-id", "log-options-id,client-options-id,client-options-id" /* Client options preferred over log options */}) public void encryptedBlobClientCustomApplicationIdInUAString(String logOptionsUA, String clientOptionsUA, String expectedUA) { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("blob") .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .httpClient(new ApplicationIdUAStringTestClient(expectedUA)) .httpLogOptions(new HttpLogOptions().setApplicationId(logOptionsUA)) .clientOptions(new ClientOptions().setApplicationId(clientOptionsUA)) .buildEncryptedBlobClient(); StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl()))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that custom headers will be honored when using the encrypted blob client builder's default * pipeline. */ @Test public void encryptedBlobClientCustomHeadersClientOptions() { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("blob") .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .httpClient(new ClientOptionsHeadersTestClient(HEADERS_MAP)) .clientOptions(new ClientOptions().setHeaders(CLIENT_OPTIONS_HEADERS)) .buildEncryptedBlobClient(); StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl()))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } @Test public void doesNotThrowOnAmbiguousCredentialsWithoutAzureSasCredential() { assertDoesNotThrow(() -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("foo") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new StorageSharedKeyCredential("foo", "bar")) .credential(new MockTokenCredential()) .sasToken("foo") .buildEncryptedBlobClient()); } @Test public void throwsOnAmbiguousCredentialsWithAzureSasCredential() { assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new StorageSharedKeyCredential("foo", "bar")) .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new MockTokenCredential()) .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .sasToken("foo") .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT + "?sig=foo") .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); } @Test public void onlyOneRetryOptionsCanBeApplied() { assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("foo") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .retryOptions(REQUEST_RETRY_OPTIONS) .retryOptions(CORE_RETRY_OPTIONS) .buildEncryptedBlobClient()); } @Test public void constructFromBlobClientBlobUserAgentModificationPolicy() { BlobClient blobClient = new BlobClientBuilder() .endpoint(ENDPOINT) .credential(CREDENTIALS) .blobName("foo") .containerName("container") .httpClient(new UAStringTestClient("azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")) .buildClient(); EncryptedBlobClient cryptoClient = new EncryptedBlobClientBuilder() .blobClient(blobClient) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .buildEncryptedBlobClient(); sendAndValidateUserAgentHeader(cryptoClient.getHttpPipeline(), cryptoClient.getBlobUrl()); } @Test public void constructFromNoClientBlobUserAgentModificationPolicy() { EncryptedBlobClient cryptoClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new AzureSasCredential("foo")) .httpClient(new UAStringTestClient("azstorage-clientsideencryption/1.0 azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")) .buildEncryptedBlobClient(); sendAndValidateUserAgentHeader(cryptoClient.getHttpPipeline(), cryptoClient.getBlobUrl()); } private static Stream<Arguments> getNonEncodedBlobNameSupplier() { return Stream.of( Arguments.of("test%test"), Arguments.of("ab2a7d5f-b973-4222-83ba-d0581817a819 %Россия 한국 中国!?/file"), Arguments.of("%E6%96%91%E9%BB%9E"), Arguments.of("斑點")); } @ParameterizedTest @MethodSource("getNonEncodedBlobNameSupplier") public void getNonEncodedBlobName(String originalBlobName) { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName(originalBlobName) .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .buildEncryptedBlobClient(); assertEquals(encryptedBlobClient.getBlobName(), originalBlobName); String encodedName = Utility.urlEncode(originalBlobName); assertTrue(encryptedBlobClient.getBlobUrl().contains(encodedName)); } @ParameterizedTest @ValueSource(longs = { 0, -1, 15, 4L * Constants.GB }) @ParameterizedTest @ValueSource(longs = { 16, 4 * Constants.KB, 4 * Constants.MB, Constants.GB }) public void encryptedRegionLength(long regionLength) { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder(EncryptionVersion.V2) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .clientSideEncryptionOptions(new BlobClientSideEncryptionOptions() .setAuthenticatedRegionDataLengthInBytes(regionLength)) .buildEncryptedBlobClient(); assertEquals(regionLength, encryptedBlobClient.getClientSideEncryptionOptions().getAuthenticatedRegionDataLengthInBytes()); } @Test public void encryptedRegionLengthDefault() { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder(EncryptionVersion.V2) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .buildEncryptedBlobClient(); assertEquals(4 * Constants.MB, encryptedBlobClient.getClientSideEncryptionOptions().getAuthenticatedRegionDataLengthInBytes()); } private static void sendAndValidateUserAgentHeader(HttpPipeline pipeline, String url) { boolean foundPolicy = false; for (int i = 0; i < pipeline.getPolicyCount(); i++) { foundPolicy |= (pipeline.getPolicy(i) instanceof BlobUserAgentModificationPolicy); } assertTrue(foundPolicy); StepVerifier.create(pipeline.send(request(url))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } private static final class UAStringTestClient implements HttpClient { private final Pattern pattern; UAStringTestClient(String regex) { this.pattern = Pattern.compile(regex); } @Override public Mono<HttpResponse> send(HttpRequest request) { if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(HttpHeaderName.USER_AGENT))) { throw new RuntimeException("Failed to set 'User-Agent' header."); } Matcher matcher = pattern.matcher(request.getHeaders().getValue(HttpHeaderName.USER_AGENT)); assertTrue(matcher.matches()); return Mono.just(new MockHttpResponse(request, 200)); } } private static final class FreshDateTestClient implements HttpClient { private String firstDate; @Override public Mono<HttpResponse> send(HttpRequest request) { if (firstDate == null) { firstDate = request.getHeaders().getValue(HttpHeaderName.DATE); return Mono.error(new IOException("IOException!")); } assertNotEquals(firstDate, request.getHeaders().getValue(HttpHeaderName.DATE)); return Mono.just(new MockHttpResponse(request, 200)); } } private static final class ApplicationIdUAStringTestClient implements HttpClient { private final String expectedUA; ApplicationIdUAStringTestClient(String expectedUA) { this.expectedUA = expectedUA; } @Override public Mono<HttpResponse> send(HttpRequest request) { assertTrue(request.getHeaders().getValue(HttpHeaderName.USER_AGENT).startsWith(expectedUA)); return Mono.just(new MockHttpResponse(request, 200)); } } private static final class ClientOptionsHeadersTestClient implements HttpClient { private final Map<HttpHeaderName, String> headers; ClientOptionsHeadersTestClient(Map<HttpHeaderName, String> headers) { this.headers = headers; } @Override public Mono<HttpResponse> send(HttpRequest request) { headers.forEach((name, value) -> { if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(name))) { throw new RuntimeException("Failed to set custom header " + name); } if (name == HttpHeaderName.AUTHORIZATION) { if (Objects.equals(value, request.getHeaders().getValue(HttpHeaderName.AUTHORIZATION))) { throw new RuntimeException("Custom header " + name + " did not match expectation."); } } else { if (!Objects.equals(value, request.getHeaders().getValue(name))) { throw new RuntimeException("Custom header " + name + " did not match expectation."); } } }); return Mono.just(new MockHttpResponse(request, 200)); } } }
For this test, we should just be able to instantiate just the encryption options class. We don't necessarily need to create an encrypted blob client to exercise the validation logic.
public void illegalRegionLength(long regionLength) { assertThrows(IllegalArgumentException.class, () -> new EncryptedBlobClientBuilder(EncryptionVersion.V2) .blobEncryptionOptions(new BlobEncryptionOptions().setAuthenticatedRegionDataLength(regionLength)) .buildEncryptedBlobClient()); }
.blobEncryptionOptions(new BlobEncryptionOptions().setAuthenticatedRegionDataLength(regionLength))
public void illegalRegionLength(long regionLength) { assertThrows(IllegalArgumentException.class, () -> new BlobClientSideEncryptionOptions() .setAuthenticatedRegionDataLengthInBytes(regionLength)); }
class EncryptedBlobClientBuilderTests { private static final StorageSharedKeyCredential CREDENTIALS = new StorageSharedKeyCredential("accountName", "accountKey"); private static final String ENDPOINT = "https: private static final RequestRetryOptions REQUEST_RETRY_OPTIONS = new RequestRetryOptions(RetryPolicyType.FIXED, 2, 2, 1000L, 4000L, null); private static final RetryOptions CORE_RETRY_OPTIONS = new RetryOptions(new FixedDelayOptions(1, Duration.ofSeconds(1))); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); private static final List<Header> CLIENT_OPTIONS_HEADERS; private static final Map<HttpHeaderName, String> HEADERS_MAP; static { CLIENT_OPTIONS_HEADERS = new ArrayList<>(); CLIENT_OPTIONS_HEADERS.add(new Header("custom", "header")); CLIENT_OPTIONS_HEADERS.add(new Header("Authorization", "notthis")); CLIENT_OPTIONS_HEADERS.add(new Header("User-Agent", "overwritten")); HEADERS_MAP = new LinkedHashMap<>(); HEADERS_MAP.put(HttpHeaderName.fromString("custom"), "header"); HEADERS_MAP.put(HttpHeaderName.AUTHORIZATION, "notthis"); HEADERS_MAP.put(HttpHeaderName.USER_AGENT, "overwritten"); } private byte[] randomData; @BeforeEach public void setup() { randomData = new byte[256]; new SecureRandom().nextBytes(randomData); } static HttpRequest request(String url) { return new HttpRequest(HttpMethod.HEAD, url); } /** * Tests that a new date will be applied to every retry when using the encrypted blob client builder's default * pipeline. */ @Test public void encryptedBlobClientFreshDateOnRetry() { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("blob") .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .httpClient(new FreshDateTestClient()) .retryOptions(REQUEST_RETRY_OPTIONS) .buildEncryptedBlobClient(); StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl()))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that a user application id will be honored in the UA string when using the encrypted blob client builder's default * pipeline. */ @ParameterizedTest @CsvSource(value = {"log-options-id,,log-options-id", ",client-options-id,client-options-id", "log-options-id,client-options-id,client-options-id" /* Client options preferred over log options */}) public void encryptedBlobClientCustomApplicationIdInUAString(String logOptionsUA, String clientOptionsUA, String expectedUA) { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("blob") .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .httpClient(new ApplicationIdUAStringTestClient(expectedUA)) .httpLogOptions(new HttpLogOptions().setApplicationId(logOptionsUA)) .clientOptions(new ClientOptions().setApplicationId(clientOptionsUA)) .buildEncryptedBlobClient(); StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl()))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that custom headers will be honored when using the encrypted blob client builder's default * pipeline. */ @Test public void encryptedBlobClientCustomHeadersClientOptions() { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("blob") .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .httpClient(new ClientOptionsHeadersTestClient(HEADERS_MAP)) .clientOptions(new ClientOptions().setHeaders(CLIENT_OPTIONS_HEADERS)) .buildEncryptedBlobClient(); StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl()))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } @Test public void doesNotThrowOnAmbiguousCredentialsWithoutAzureSasCredential() { assertDoesNotThrow(() -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("foo") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new StorageSharedKeyCredential("foo", "bar")) .credential(new MockTokenCredential()) .sasToken("foo") .buildEncryptedBlobClient()); } @Test public void throwsOnAmbiguousCredentialsWithAzureSasCredential() { assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new StorageSharedKeyCredential("foo", "bar")) .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new MockTokenCredential()) .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .sasToken("foo") .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT + "?sig=foo") .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); } @Test public void onlyOneRetryOptionsCanBeApplied() { assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("foo") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .retryOptions(REQUEST_RETRY_OPTIONS) .retryOptions(CORE_RETRY_OPTIONS) .buildEncryptedBlobClient()); } @Test public void constructFromBlobClientBlobUserAgentModificationPolicy() { BlobClient blobClient = new BlobClientBuilder() .endpoint(ENDPOINT) .credential(CREDENTIALS) .blobName("foo") .containerName("container") .httpClient(new UAStringTestClient("azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")) .buildClient(); EncryptedBlobClient cryptoClient = new EncryptedBlobClientBuilder() .blobClient(blobClient) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .buildEncryptedBlobClient(); sendAndValidateUserAgentHeader(cryptoClient.getHttpPipeline(), cryptoClient.getBlobUrl()); } @Test public void constructFromNoClientBlobUserAgentModificationPolicy() { EncryptedBlobClient cryptoClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new AzureSasCredential("foo")) .httpClient(new UAStringTestClient("azstorage-clientsideencryption/1.0 azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")) .buildEncryptedBlobClient(); sendAndValidateUserAgentHeader(cryptoClient.getHttpPipeline(), cryptoClient.getBlobUrl()); } private static Stream<Arguments> getNonEncodedBlobNameSupplier() { return Stream.of( Arguments.of("test%test"), Arguments.of("ab2a7d5f-b973-4222-83ba-d0581817a819 %Россия 한국 中国!?/file"), Arguments.of("%E6%96%91%E9%BB%9E"), Arguments.of("斑點")); } @ParameterizedTest @MethodSource("getNonEncodedBlobNameSupplier") public void getNonEncodedBlobName(String originalBlobName) { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName(originalBlobName) .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .buildEncryptedBlobClient(); assertEquals(encryptedBlobClient.getBlobName(), originalBlobName); String encodedName = Utility.urlEncode(originalBlobName); assertTrue(encryptedBlobClient.getBlobUrl().contains(encodedName)); } @ParameterizedTest @ValueSource(longs = { 0, -1, 15, 4L * Constants.GB }) private static void sendAndValidateUserAgentHeader(HttpPipeline pipeline, String url) { boolean foundPolicy = false; for (int i = 0; i < pipeline.getPolicyCount(); i++) { foundPolicy |= (pipeline.getPolicy(i) instanceof BlobUserAgentModificationPolicy); } assertTrue(foundPolicy); StepVerifier.create(pipeline.send(request(url))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } private static final class UAStringTestClient implements HttpClient { private final Pattern pattern; UAStringTestClient(String regex) { this.pattern = Pattern.compile(regex); } @Override public Mono<HttpResponse> send(HttpRequest request) { if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(HttpHeaderName.USER_AGENT))) { throw new RuntimeException("Failed to set 'User-Agent' header."); } Matcher matcher = pattern.matcher(request.getHeaders().getValue(HttpHeaderName.USER_AGENT)); assertTrue(matcher.matches()); return Mono.just(new MockHttpResponse(request, 200)); } } private static final class FreshDateTestClient implements HttpClient { private String firstDate; @Override public Mono<HttpResponse> send(HttpRequest request) { if (firstDate == null) { firstDate = request.getHeaders().getValue(HttpHeaderName.DATE); return Mono.error(new IOException("IOException!")); } assertNotEquals(firstDate, request.getHeaders().getValue(HttpHeaderName.DATE)); return Mono.just(new MockHttpResponse(request, 200)); } } private static final class ApplicationIdUAStringTestClient implements HttpClient { private final String expectedUA; ApplicationIdUAStringTestClient(String expectedUA) { this.expectedUA = expectedUA; } @Override public Mono<HttpResponse> send(HttpRequest request) { assertTrue(request.getHeaders().getValue(HttpHeaderName.USER_AGENT).startsWith(expectedUA)); return Mono.just(new MockHttpResponse(request, 200)); } } private static final class ClientOptionsHeadersTestClient implements HttpClient { private final Map<HttpHeaderName, String> headers; ClientOptionsHeadersTestClient(Map<HttpHeaderName, String> headers) { this.headers = headers; } @Override public Mono<HttpResponse> send(HttpRequest request) { headers.forEach((name, value) -> { if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(name))) { throw new RuntimeException("Failed to set custom header " + name); } if (name == HttpHeaderName.AUTHORIZATION) { if (Objects.equals(value, request.getHeaders().getValue(HttpHeaderName.AUTHORIZATION))) { throw new RuntimeException("Custom header " + name + " did not match expectation."); } } else { if (!Objects.equals(value, request.getHeaders().getValue(name))) { throw new RuntimeException("Custom header " + name + " did not match expectation."); } } }); return Mono.just(new MockHttpResponse(request, 200)); } } }
class EncryptedBlobClientBuilderTests { private static final StorageSharedKeyCredential CREDENTIALS = new StorageSharedKeyCredential("accountName", "accountKey"); private static final String ENDPOINT = "https: private static final RequestRetryOptions REQUEST_RETRY_OPTIONS = new RequestRetryOptions(RetryPolicyType.FIXED, 2, 2, 1000L, 4000L, null); private static final RetryOptions CORE_RETRY_OPTIONS = new RetryOptions(new FixedDelayOptions(1, Duration.ofSeconds(1))); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); private static final List<Header> CLIENT_OPTIONS_HEADERS; private static final Map<HttpHeaderName, String> HEADERS_MAP; static { CLIENT_OPTIONS_HEADERS = new ArrayList<>(); CLIENT_OPTIONS_HEADERS.add(new Header("custom", "header")); CLIENT_OPTIONS_HEADERS.add(new Header("Authorization", "notthis")); CLIENT_OPTIONS_HEADERS.add(new Header("User-Agent", "overwritten")); HEADERS_MAP = new LinkedHashMap<>(); HEADERS_MAP.put(HttpHeaderName.fromString("custom"), "header"); HEADERS_MAP.put(HttpHeaderName.AUTHORIZATION, "notthis"); HEADERS_MAP.put(HttpHeaderName.USER_AGENT, "overwritten"); } private byte[] randomData; @BeforeEach public void setup() { randomData = new byte[256]; new SecureRandom().nextBytes(randomData); } static HttpRequest request(String url) { return new HttpRequest(HttpMethod.HEAD, url); } /** * Tests that a new date will be applied to every retry when using the encrypted blob client builder's default * pipeline. */ @Test public void encryptedBlobClientFreshDateOnRetry() { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("blob") .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .httpClient(new FreshDateTestClient()) .retryOptions(REQUEST_RETRY_OPTIONS) .buildEncryptedBlobClient(); StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl()))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that a user application id will be honored in the UA string when using the encrypted blob client builder's default * pipeline. */ @ParameterizedTest @CsvSource(value = {"log-options-id,,log-options-id", ",client-options-id,client-options-id", "log-options-id,client-options-id,client-options-id" /* Client options preferred over log options */}) public void encryptedBlobClientCustomApplicationIdInUAString(String logOptionsUA, String clientOptionsUA, String expectedUA) { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("blob") .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .httpClient(new ApplicationIdUAStringTestClient(expectedUA)) .httpLogOptions(new HttpLogOptions().setApplicationId(logOptionsUA)) .clientOptions(new ClientOptions().setApplicationId(clientOptionsUA)) .buildEncryptedBlobClient(); StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl()))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that custom headers will be honored when using the encrypted blob client builder's default * pipeline. */ @Test public void encryptedBlobClientCustomHeadersClientOptions() { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("blob") .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .httpClient(new ClientOptionsHeadersTestClient(HEADERS_MAP)) .clientOptions(new ClientOptions().setHeaders(CLIENT_OPTIONS_HEADERS)) .buildEncryptedBlobClient(); StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl()))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } @Test public void doesNotThrowOnAmbiguousCredentialsWithoutAzureSasCredential() { assertDoesNotThrow(() -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName("foo") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new StorageSharedKeyCredential("foo", "bar")) .credential(new MockTokenCredential()) .sasToken("foo") .buildEncryptedBlobClient()); } @Test public void throwsOnAmbiguousCredentialsWithAzureSasCredential() { assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new StorageSharedKeyCredential("foo", "bar")) .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new MockTokenCredential()) .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .sasToken("foo") .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT + "?sig=foo") .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new AzureSasCredential("foo")) .buildEncryptedBlobClient()); } @Test public void onlyOneRetryOptionsCanBeApplied() { assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("foo") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .retryOptions(REQUEST_RETRY_OPTIONS) .retryOptions(CORE_RETRY_OPTIONS) .buildEncryptedBlobClient()); } @Test public void constructFromBlobClientBlobUserAgentModificationPolicy() { BlobClient blobClient = new BlobClientBuilder() .endpoint(ENDPOINT) .credential(CREDENTIALS) .blobName("foo") .containerName("container") .httpClient(new UAStringTestClient("azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")) .buildClient(); EncryptedBlobClient cryptoClient = new EncryptedBlobClientBuilder() .blobClient(blobClient) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .buildEncryptedBlobClient(); sendAndValidateUserAgentHeader(cryptoClient.getHttpPipeline(), cryptoClient.getBlobUrl()); } @Test public void constructFromNoClientBlobUserAgentModificationPolicy() { EncryptedBlobClient cryptoClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .credential(new AzureSasCredential("foo")) .httpClient(new UAStringTestClient("azstorage-clientsideencryption/1.0 azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")) .buildEncryptedBlobClient(); sendAndValidateUserAgentHeader(cryptoClient.getHttpPipeline(), cryptoClient.getBlobUrl()); } private static Stream<Arguments> getNonEncodedBlobNameSupplier() { return Stream.of( Arguments.of("test%test"), Arguments.of("ab2a7d5f-b973-4222-83ba-d0581817a819 %Россия 한국 中国!?/file"), Arguments.of("%E6%96%91%E9%BB%9E"), Arguments.of("斑點")); } @ParameterizedTest @MethodSource("getNonEncodedBlobNameSupplier") public void getNonEncodedBlobName(String originalBlobName) { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder() .endpoint(ENDPOINT) .containerName("container") .blobName(originalBlobName) .credential(CREDENTIALS) .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .buildEncryptedBlobClient(); assertEquals(encryptedBlobClient.getBlobName(), originalBlobName); String encodedName = Utility.urlEncode(originalBlobName); assertTrue(encryptedBlobClient.getBlobUrl().contains(encodedName)); } @ParameterizedTest @ValueSource(longs = { 0, -1, 15, 4L * Constants.GB }) @ParameterizedTest @ValueSource(longs = { 16, 4 * Constants.KB, 4 * Constants.MB, Constants.GB }) public void encryptedRegionLength(long regionLength) { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder(EncryptionVersion.V2) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .clientSideEncryptionOptions(new BlobClientSideEncryptionOptions() .setAuthenticatedRegionDataLengthInBytes(regionLength)) .buildEncryptedBlobClient(); assertEquals(regionLength, encryptedBlobClient.getClientSideEncryptionOptions().getAuthenticatedRegionDataLengthInBytes()); } @Test public void encryptedRegionLengthDefault() { EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder(EncryptionVersion.V2) .blobName("foo") .containerName("container") .key(new FakeKey("keyId", randomData), "keyWrapAlgorithm") .buildEncryptedBlobClient(); assertEquals(4 * Constants.MB, encryptedBlobClient.getClientSideEncryptionOptions().getAuthenticatedRegionDataLengthInBytes()); } private static void sendAndValidateUserAgentHeader(HttpPipeline pipeline, String url) { boolean foundPolicy = false; for (int i = 0; i < pipeline.getPolicyCount(); i++) { foundPolicy |= (pipeline.getPolicy(i) instanceof BlobUserAgentModificationPolicy); } assertTrue(foundPolicy); StepVerifier.create(pipeline.send(request(url))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } private static final class UAStringTestClient implements HttpClient { private final Pattern pattern; UAStringTestClient(String regex) { this.pattern = Pattern.compile(regex); } @Override public Mono<HttpResponse> send(HttpRequest request) { if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(HttpHeaderName.USER_AGENT))) { throw new RuntimeException("Failed to set 'User-Agent' header."); } Matcher matcher = pattern.matcher(request.getHeaders().getValue(HttpHeaderName.USER_AGENT)); assertTrue(matcher.matches()); return Mono.just(new MockHttpResponse(request, 200)); } } private static final class FreshDateTestClient implements HttpClient { private String firstDate; @Override public Mono<HttpResponse> send(HttpRequest request) { if (firstDate == null) { firstDate = request.getHeaders().getValue(HttpHeaderName.DATE); return Mono.error(new IOException("IOException!")); } assertNotEquals(firstDate, request.getHeaders().getValue(HttpHeaderName.DATE)); return Mono.just(new MockHttpResponse(request, 200)); } } private static final class ApplicationIdUAStringTestClient implements HttpClient { private final String expectedUA; ApplicationIdUAStringTestClient(String expectedUA) { this.expectedUA = expectedUA; } @Override public Mono<HttpResponse> send(HttpRequest request) { assertTrue(request.getHeaders().getValue(HttpHeaderName.USER_AGENT).startsWith(expectedUA)); return Mono.just(new MockHttpResponse(request, 200)); } } private static final class ClientOptionsHeadersTestClient implements HttpClient { private final Map<HttpHeaderName, String> headers; ClientOptionsHeadersTestClient(Map<HttpHeaderName, String> headers) { this.headers = headers; } @Override public Mono<HttpResponse> send(HttpRequest request) { headers.forEach((name, value) -> { if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(name))) { throw new RuntimeException("Failed to set custom header " + name); } if (name == HttpHeaderName.AUTHORIZATION) { if (Objects.equals(value, request.getHeaders().getValue(HttpHeaderName.AUTHORIZATION))) { throw new RuntimeException("Custom header " + name + " did not match expectation."); } } else { if (!Objects.equals(value, request.getHeaders().getValue(name))) { throw new RuntimeException("Custom header " + name + " did not match expectation."); } } }); return Mono.just(new MockHttpResponse(request, 200)); } } }
Would it be possible to create another version of this test where we test it from the sync interface? I think as we do the migration it would be nice to have both versions of it.
public void uploadAndDownloadDifferentRegionLength(int regionLength, int dataSize) { ByteBuffer data = getRandomData(dataSize); beac = mockAesKey(getEncryptedClientBuilder(fakeKey, null, ENV.getPrimaryAccount().getCredential(), cc.getBlobContainerUrl(), EncryptionVersion.V2) .blobName(generateBlobName()) .blobEncryptionOptions(new BlobEncryptionOptions().setAuthenticatedRegionDataLength(regionLength)) .buildEncryptedBlobAsyncClient()); beac.uploadWithResponse(new BlobParallelUploadOptions(Flux.just(data.duplicate()))).block(); ByteArrayOutputStream plaintextOut = new ByteArrayOutputStream(); new EncryptedBlobClient(beac).downloadStream(plaintextOut); assertArraysEqual(data.array(), plaintextOut.toByteArray()); }
.buildEncryptedBlobAsyncClient());
public void uploadAndDownloadDifferentRegionLength(int regionLength, int dataSize) { ByteBuffer data = getRandomData(dataSize); ebc = new EncryptedBlobClient(mockAesKey(getEncryptedClientBuilder(fakeKey, null, ENV.getPrimaryAccount().getCredential(), cc.getBlobContainerUrl(), EncryptionVersion.V2) .blobName(generateBlobName()) .clientSideEncryptionOptions(new BlobClientSideEncryptionOptions().setAuthenticatedRegionDataLengthInBytes(regionLength)) .buildEncryptedBlobAsyncClient())); ebc.uploadWithResponse(new BlobParallelUploadOptions(BinaryData.fromByteBuffer(data.duplicate())), null, null); ByteArrayOutputStream plaintextOut = new ByteArrayOutputStream(); ebc.downloadStream(plaintextOut); assertArraysEqual(data.array(), plaintextOut.toByteArray()); }
class NoOpKey implements AsyncKeyEncryptionKey { @Override public Mono<String> getKeyId() { return Mono.just("local:key1"); } @Override public Mono<byte[]> wrapKey(String algorithm, byte[] key) { if (!"None".equals(algorithm)) { throw new IllegalArgumentException(); } return Mono.just(key); } @Override public Mono<byte[]> unwrapKey(String algorithm, byte[] encryptedKey) { if (!"None".equals(algorithm)) { throw new IllegalArgumentException(); } return Mono.just(encryptedKey); } }
class NoOpKey implements AsyncKeyEncryptionKey { @Override public Mono<String> getKeyId() { return Mono.just("local:key1"); } @Override public Mono<byte[]> wrapKey(String algorithm, byte[] key) { if (!"None".equals(algorithm)) { throw new IllegalArgumentException(); } return Mono.just(key); } @Override public Mono<byte[]> unwrapKey(String algorithm, byte[] encryptedKey) { if (!"None".equals(algorithm)) { throw new IllegalArgumentException(); } return Mono.just(encryptedKey); } }
It would also be good to see if we have test cases for the `downloadToFile` method since it has some extra logic on the `downloadStream` that exercise the ranged header expansion that the `downloadStream` method won't use.
public void uploadAndDownloadDifferentRegionLength(int regionLength, int dataSize) { ByteBuffer data = getRandomData(dataSize); beac = mockAesKey(getEncryptedClientBuilder(fakeKey, null, ENV.getPrimaryAccount().getCredential(), cc.getBlobContainerUrl(), EncryptionVersion.V2) .blobName(generateBlobName()) .blobEncryptionOptions(new BlobEncryptionOptions().setAuthenticatedRegionDataLength(regionLength)) .buildEncryptedBlobAsyncClient()); beac.uploadWithResponse(new BlobParallelUploadOptions(Flux.just(data.duplicate()))).block(); ByteArrayOutputStream plaintextOut = new ByteArrayOutputStream(); new EncryptedBlobClient(beac).downloadStream(plaintextOut); assertArraysEqual(data.array(), plaintextOut.toByteArray()); }
new EncryptedBlobClient(beac).downloadStream(plaintextOut);
public void uploadAndDownloadDifferentRegionLength(int regionLength, int dataSize) { ByteBuffer data = getRandomData(dataSize); ebc = new EncryptedBlobClient(mockAesKey(getEncryptedClientBuilder(fakeKey, null, ENV.getPrimaryAccount().getCredential(), cc.getBlobContainerUrl(), EncryptionVersion.V2) .blobName(generateBlobName()) .clientSideEncryptionOptions(new BlobClientSideEncryptionOptions().setAuthenticatedRegionDataLengthInBytes(regionLength)) .buildEncryptedBlobAsyncClient())); ebc.uploadWithResponse(new BlobParallelUploadOptions(BinaryData.fromByteBuffer(data.duplicate())), null, null); ByteArrayOutputStream plaintextOut = new ByteArrayOutputStream(); ebc.downloadStream(plaintextOut); assertArraysEqual(data.array(), plaintextOut.toByteArray()); }
class NoOpKey implements AsyncKeyEncryptionKey { @Override public Mono<String> getKeyId() { return Mono.just("local:key1"); } @Override public Mono<byte[]> wrapKey(String algorithm, byte[] key) { if (!"None".equals(algorithm)) { throw new IllegalArgumentException(); } return Mono.just(key); } @Override public Mono<byte[]> unwrapKey(String algorithm, byte[] encryptedKey) { if (!"None".equals(algorithm)) { throw new IllegalArgumentException(); } return Mono.just(encryptedKey); } }
class NoOpKey implements AsyncKeyEncryptionKey { @Override public Mono<String> getKeyId() { return Mono.just("local:key1"); } @Override public Mono<byte[]> wrapKey(String algorithm, byte[] key) { if (!"None".equals(algorithm)) { throw new IllegalArgumentException(); } return Mono.just(key); } @Override public Mono<byte[]> unwrapKey(String algorithm, byte[] encryptedKey) { if (!"None".equals(algorithm)) { throw new IllegalArgumentException(); } return Mono.just(encryptedKey); } }
We can now remove this entire block comment now that we are handling this :)
public Flux<BufferAggregator> write(ByteBuffer buf) { if (this.currentBuf == null) { this.currentBuf = new BufferAggregator(this.buffSize); } Flux<BufferAggregator> result; if (this.currentBuf.remainingCapacity() >= buf.remaining()) { this.currentBuf.append(buf); if (this.currentBuf.remainingCapacity() == 0) { result = Flux.just(this.currentBuf); this.currentBuf = null; } else { /* We are still filling the current buffer, so we have no data to return. We will return the buffer once it is filled */ result = Flux.empty(); } } else { ByteBuffer duplicate = buf.duplicate(); int newLimit = buf.position() + (int) this.currentBuf.remainingCapacity(); duplicate.limit(newLimit); this.currentBuf.append(duplicate); buf.position(newLimit); result = Flux.just(this.currentBuf); int remainingChunks = buf.remaining() / (int) this.buffSize; if (remainingChunks >= 1) { BufferAggregator[] aggregators = new BufferAggregator[remainingChunks]; for (int i = 0; i < remainingChunks; i++) { BufferAggregator overflowBuffer = new BufferAggregator(this.buffSize); ByteBuffer overflowDup = buf.duplicate(); int overflowLimit = buf.position() + (int) overflowBuffer.remainingCapacity(); overflowDup.limit(overflowLimit); overflowBuffer.append(overflowDup); buf.position(overflowLimit); aggregators[i] = overflowBuffer; } result = result.concatWith(Flux.fromArray(aggregators)); } /* Get a new buffer and fill it with whatever is left from buf. Note that this relies on the assumption that the source Flux has been split up into buffers that are no bigger than chunk size. This assumption means we'll only have to over flow once, and the buffer we overflow into will not be filled. This is the buffer we will write to on the next call to write(). */ if (buf.remaining() > 0) { this.currentBuf = new BufferAggregator(this.buffSize); this.currentBuf.append(buf); } else { this.currentBuf = null; } } return result; }
Get a new buffer and fill it with whatever is left from buf. Note that this relies on the assumption that
public Flux<BufferAggregator> write(ByteBuffer buf) { if (this.currentBuf == null) { this.currentBuf = new BufferAggregator(this.buffSize); } Flux<BufferAggregator> result; if (this.currentBuf.remainingCapacity() >= buf.remaining()) { this.currentBuf.append(buf); if (this.currentBuf.remainingCapacity() == 0) { result = Flux.just(this.currentBuf); this.currentBuf = null; } else { /* We are still filling the current buffer, so we have no data to return. We will return the buffer once it is filled */ result = Flux.empty(); } } else { ByteBuffer duplicate = buf.duplicate(); int newLimit = buf.position() + (int) this.currentBuf.remainingCapacity(); duplicate.limit(newLimit); this.currentBuf.append(duplicate); buf.position(newLimit); result = Flux.just(this.currentBuf); int remainingChunks = buf.remaining() / (int) this.buffSize; if (remainingChunks >= 1) { BufferAggregator[] aggregators = new BufferAggregator[remainingChunks]; for (int i = 0; i < remainingChunks; i++) { BufferAggregator aggregator = new BufferAggregator(this.buffSize); ByteBuffer overflowDup = buf.duplicate(); int overflowLimit = buf.position() + (int) this.buffSize; overflowDup.limit(overflowLimit); aggregator.append(overflowDup); buf.position(overflowLimit); aggregators[i] = aggregator; } result = result.concatWith(Flux.fromArray(aggregators)); } if (buf.remaining() > 0) { this.currentBuf = new BufferAggregator(this.buffSize); this.currentBuf.append(buf); } else { this.currentBuf = null; } } return result; }
class BufferStagingArea { private final long buffSize; private BufferAggregator currentBuf; /** * Creates a new instance of UploadBufferPool * @param buffSize The size of the buffers * @param maxBuffSize The max size of the buffers */ public BufferStagingArea(final long buffSize, long maxBuffSize) { StorageImplUtils.assertInBounds("buffSize", buffSize, 1, maxBuffSize); this.buffSize = buffSize; } /* Note that the upload method will be calling write sequentially as there is only one worker reading from the source and calling write. This means operations like currentBuf.remaining() will not result in race conditions. */ /** * Writes ByteBuffers to a {@code Flux<BufferAggregator>} * @param buf The buffer to write * @return The {@code Flux<BufferAggregator>} */ /** * Flushes the current buffer * @return the flushed buffer */ public Flux<BufferAggregator> flush() { /* Prep and return any data left in the pool. It is important to set the limit so that we don't read beyond the actual data as this buffer may have been used before and therefore may have some garbage at the end. */ if (this.currentBuf != null) { BufferAggregator last = this.currentBuf; this.currentBuf = null; return Flux.just(last); } return Flux.empty(); } }
class BufferStagingArea { private final long buffSize; private BufferAggregator currentBuf; /** * Creates a new instance of UploadBufferPool * @param buffSize The size of the buffers * @param maxBuffSize The max size of the buffers */ public BufferStagingArea(final long buffSize, long maxBuffSize) { StorageImplUtils.assertInBounds("buffSize", buffSize, 1, maxBuffSize); this.buffSize = buffSize; } /* Note that the upload method will be calling write sequentially as there is only one worker reading from the source and calling write. This means operations like currentBuf.remaining() will not result in race conditions. */ /** * Writes ByteBuffers to a {@code Flux<BufferAggregator>} * @param buf The buffer to write * @return The {@code Flux<BufferAggregator>} */ /** * Flushes the current buffer * @return the flushed buffer */ public Flux<BufferAggregator> flush() { /* Prep and return any data left in the pool. It is important to set the limit so that we don't read beyond the actual data as this buffer may have been used before and therefore may have some garbage at the end. */ if (this.currentBuf != null) { BufferAggregator last = this.currentBuf; this.currentBuf = null; return Flux.just(last); } return Flux.empty(); } }
We should probably add a comment to overall note that this logic is to handle when the buffer provided is larger than the remaining capacity + the fixed sized of a new BufferAggregator. We can even probably use some of the wording in explaining it from the comment that I mention we should remove later on how we don't handle this sort of overflow.
public Flux<BufferAggregator> write(ByteBuffer buf) { if (this.currentBuf == null) { this.currentBuf = new BufferAggregator(this.buffSize); } Flux<BufferAggregator> result; if (this.currentBuf.remainingCapacity() >= buf.remaining()) { this.currentBuf.append(buf); if (this.currentBuf.remainingCapacity() == 0) { result = Flux.just(this.currentBuf); this.currentBuf = null; } else { /* We are still filling the current buffer, so we have no data to return. We will return the buffer once it is filled */ result = Flux.empty(); } } else { ByteBuffer duplicate = buf.duplicate(); int newLimit = buf.position() + (int) this.currentBuf.remainingCapacity(); duplicate.limit(newLimit); this.currentBuf.append(duplicate); buf.position(newLimit); result = Flux.just(this.currentBuf); int remainingChunks = buf.remaining() / (int) this.buffSize; if (remainingChunks >= 1) { BufferAggregator[] aggregators = new BufferAggregator[remainingChunks]; for (int i = 0; i < remainingChunks; i++) { BufferAggregator overflowBuffer = new BufferAggregator(this.buffSize); ByteBuffer overflowDup = buf.duplicate(); int overflowLimit = buf.position() + (int) overflowBuffer.remainingCapacity(); overflowDup.limit(overflowLimit); overflowBuffer.append(overflowDup); buf.position(overflowLimit); aggregators[i] = overflowBuffer; } result = result.concatWith(Flux.fromArray(aggregators)); } /* Get a new buffer and fill it with whatever is left from buf. Note that this relies on the assumption that the source Flux has been split up into buffers that are no bigger than chunk size. This assumption means we'll only have to over flow once, and the buffer we overflow into will not be filled. This is the buffer we will write to on the next call to write(). */ if (buf.remaining() > 0) { this.currentBuf = new BufferAggregator(this.buffSize); this.currentBuf.append(buf); } else { this.currentBuf = null; } } return result; }
public Flux<BufferAggregator> write(ByteBuffer buf) { if (this.currentBuf == null) { this.currentBuf = new BufferAggregator(this.buffSize); } Flux<BufferAggregator> result; if (this.currentBuf.remainingCapacity() >= buf.remaining()) { this.currentBuf.append(buf); if (this.currentBuf.remainingCapacity() == 0) { result = Flux.just(this.currentBuf); this.currentBuf = null; } else { /* We are still filling the current buffer, so we have no data to return. We will return the buffer once it is filled */ result = Flux.empty(); } } else { ByteBuffer duplicate = buf.duplicate(); int newLimit = buf.position() + (int) this.currentBuf.remainingCapacity(); duplicate.limit(newLimit); this.currentBuf.append(duplicate); buf.position(newLimit); result = Flux.just(this.currentBuf); int remainingChunks = buf.remaining() / (int) this.buffSize; if (remainingChunks >= 1) { BufferAggregator[] aggregators = new BufferAggregator[remainingChunks]; for (int i = 0; i < remainingChunks; i++) { BufferAggregator aggregator = new BufferAggregator(this.buffSize); ByteBuffer overflowDup = buf.duplicate(); int overflowLimit = buf.position() + (int) this.buffSize; overflowDup.limit(overflowLimit); aggregator.append(overflowDup); buf.position(overflowLimit); aggregators[i] = aggregator; } result = result.concatWith(Flux.fromArray(aggregators)); } if (buf.remaining() > 0) { this.currentBuf = new BufferAggregator(this.buffSize); this.currentBuf.append(buf); } else { this.currentBuf = null; } } return result; }
class BufferStagingArea { private final long buffSize; private BufferAggregator currentBuf; /** * Creates a new instance of UploadBufferPool * @param buffSize The size of the buffers * @param maxBuffSize The max size of the buffers */ public BufferStagingArea(final long buffSize, long maxBuffSize) { StorageImplUtils.assertInBounds("buffSize", buffSize, 1, maxBuffSize); this.buffSize = buffSize; } /* Note that the upload method will be calling write sequentially as there is only one worker reading from the source and calling write. This means operations like currentBuf.remaining() will not result in race conditions. */ /** * Writes ByteBuffers to a {@code Flux<BufferAggregator>} * @param buf The buffer to write * @return The {@code Flux<BufferAggregator>} */ /** * Flushes the current buffer * @return the flushed buffer */ public Flux<BufferAggregator> flush() { /* Prep and return any data left in the pool. It is important to set the limit so that we don't read beyond the actual data as this buffer may have been used before and therefore may have some garbage at the end. */ if (this.currentBuf != null) { BufferAggregator last = this.currentBuf; this.currentBuf = null; return Flux.just(last); } return Flux.empty(); } }
class BufferStagingArea { private final long buffSize; private BufferAggregator currentBuf; /** * Creates a new instance of UploadBufferPool * @param buffSize The size of the buffers * @param maxBuffSize The max size of the buffers */ public BufferStagingArea(final long buffSize, long maxBuffSize) { StorageImplUtils.assertInBounds("buffSize", buffSize, 1, maxBuffSize); this.buffSize = buffSize; } /* Note that the upload method will be calling write sequentially as there is only one worker reading from the source and calling write. This means operations like currentBuf.remaining() will not result in race conditions. */ /** * Writes ByteBuffers to a {@code Flux<BufferAggregator>} * @param buf The buffer to write * @return The {@code Flux<BufferAggregator>} */ /** * Flushes the current buffer * @return the flushed buffer */ public Flux<BufferAggregator> flush() { /* Prep and return any data left in the pool. It is important to set the limit so that we don't read beyond the actual data as this buffer may have been used before and therefore may have some garbage at the end. */ if (this.currentBuf != null) { BufferAggregator last = this.currentBuf; this.currentBuf = null; return Flux.just(last); } return Flux.empty(); } }
Let's also add an assertion that length of each of these byte buffers is equal to the `stagingSize`. This is an important property that needs to hold for encryption/decryption to work.
public void bufferStagingAreaWithSmallerBuffer(int stagingSize, int dataSize) { byte[] bytes = new byte[dataSize]; ThreadLocalRandom.current().nextBytes(bytes); ByteBuffer buffer = ByteBuffer.wrap(bytes); Flux<ByteBuffer> byteBufferFlux = Flux.just(buffer); BufferStagingArea stagingArea = new BufferStagingArea(stagingSize, stagingSize); List<ByteBuffer> collectedBuffers = byteBufferFlux.flatMapSequential(stagingArea::write, 1) .concatWith(Flux.defer(stagingArea::flush)) .flatMap(BufferAggregator::asFlux) .collectList().block(); assertNotNull(collectedBuffers); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); collectedBuffers.forEach(bb -> { byte[] array = new byte[bb.remaining()]; bb.get(array); try { outputStream.write(array); } catch (IOException e) { throw new RuntimeException(e); } }); byte[] reconstructedData = outputStream.toByteArray(); assertArrayEquals(bytes, reconstructedData); }
outputStream.write(array);
public void bufferStagingAreaWithSmallerBuffer(int stagingSize, int dataSize) { byte[] bytes = new byte[dataSize]; ThreadLocalRandom.current().nextBytes(bytes); ByteBuffer buffer = ByteBuffer.wrap(bytes); Flux<ByteBuffer> byteBufferFlux = Flux.just(buffer); BufferStagingArea stagingArea = new BufferStagingArea(stagingSize, stagingSize); List<ByteBuffer> collectedBuffers = byteBufferFlux.flatMapSequential(stagingArea::write, 1) .concatWith(Flux.defer(stagingArea::flush)) .flatMap(BufferAggregator::asFlux) .collectList().block(); assertNotNull(collectedBuffers); assertFalse(collectedBuffers.isEmpty()); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); int sizeRemaining = dataSize % stagingSize; for (int i = 0; i < collectedBuffers.size() - 1; i++) { ByteBuffer bb = collectedBuffers.get(i); byte[] array = new byte[bb.remaining()]; assertEquals(stagingSize, array.length); bb.get(array); try { outputStream.write(array); } catch (IOException e) { throw new RuntimeException(e); } } ByteBuffer lastBuffer = collectedBuffers.get(collectedBuffers.size() - 1); byte[] lastArray = new byte[lastBuffer.remaining()]; lastBuffer.get(lastArray); if (sizeRemaining != 0) { assertEquals(sizeRemaining, lastArray.length, "The last buffer's size should match the remaining size."); } else { assertEquals(stagingSize, lastArray.length, "The last buffer should match the staging size if no remainder."); } try { outputStream.write(lastArray); } catch (IOException e) { throw new RuntimeException(e); } byte[] reconstructedData = outputStream.toByteArray(); assertArrayEquals(bytes, reconstructedData); }
class BufferStagingAreaTests { static Tuple2<ByteBuffer[], ByteBuffer[]> generateData(int numBuffs, int minBuffSize, int maxBuffSize) { int totalSize = 0; int[] sizes = new int[numBuffs]; for (int i = 0; i < numBuffs; i++) { int size = minBuffSize; if (maxBuffSize != minBuffSize) { size += ThreadLocalRandom.current().nextInt(maxBuffSize - minBuffSize); } sizes[i] = size; totalSize += size; } byte[] bytes = new byte[totalSize]; ThreadLocalRandom.current().nextBytes(bytes); ByteBuffer[] data = new ByteBuffer[numBuffs]; int begin = 0; for (int i = 0; i < numBuffs; i++) { int end = begin + sizes[i]; data[i] = ByteBuffer.wrap(Arrays.copyOfRange(bytes, begin, end)); begin += sizes[i]; } int expectedNumBuffs = (int) Math.ceil((double) totalSize / maxBuffSize); ByteBuffer[] expectedData = new ByteBuffer[expectedNumBuffs]; for (int i = 0; i < expectedNumBuffs; i++) { begin = i * maxBuffSize; int end = Math.min((i + 1) * maxBuffSize, totalSize); expectedData[i] = ByteBuffer.wrap(Arrays.copyOfRange(bytes, begin, end)); } return Tuples.of(data, expectedData); } static byte[] byteBufferListToByteArray(List<ByteBuffer> buffers) { int totalSize = 0; for (ByteBuffer b: buffers) { totalSize += b.remaining(); } byte[] bytes = new byte[totalSize]; int begin = 0; for (ByteBuffer b: buffers) { System.arraycopy(b.array(), b.position(), bytes, begin, b.remaining()); begin += b.remaining(); } return bytes; } @ParameterizedTest @MethodSource("bufferStagingAreaSupplier") public void bufferStagingArea(int numBuffs, int minBuffSize, int maxBuffSize) { BufferStagingArea stagingArea = new BufferStagingArea(maxBuffSize, maxBuffSize); Tuple2<ByteBuffer[], ByteBuffer[]> generatedData = generateData(numBuffs, minBuffSize, maxBuffSize); Flux<ByteBuffer> data = Flux.fromArray(generatedData.getT1()); ByteBuffer[] expectedData = generatedData.getT2(); List<List<ByteBuffer>> recoveredData = data.flatMapSequential(stagingArea::write, 1) .concatWith(Flux.defer(stagingArea::flush)).flatMap(aggregator -> aggregator.asFlux().collectList()) .collectList().block(); assertEquals(expectedData.length, recoveredData.size()); for (int i = 0; i < expectedData.length; i++) { TestUtils.assertArraysEqual(expectedData[i].array(), byteBufferListToByteArray(recoveredData.get(i))); } } private static Stream<Arguments> bufferStagingAreaSupplier() { return Stream.of( Arguments.of(10, 1000, 1000), Arguments.of(100, 1000, 1000), Arguments.of(1000, 1000, 1000), Arguments.of(10000, 1000, 1000), Arguments.of(10000, 1, 1000), Arguments.of(100, 1, Constants.MB * 4), Arguments.of(100, Constants.MB * 4, Constants.MB * 8) ); } @ParameterizedTest @MethodSource("bufferStagingAreaWithSmallerBufferSupplier") private static Stream<Arguments> bufferStagingAreaWithSmallerBufferSupplier() { return Stream.of( Arguments.of(4 * Constants.KB, 4 * Constants.MB), Arguments.of(Constants.KB, 4 * Constants.MB), Arguments.of(25, Constants.KB), Arguments.of(2 * Constants.KB, 4 * Constants.MB), Arguments.of(10 * Constants.KB, 4 * Constants.MB) ); } }
class BufferStagingAreaTests { static Tuple2<ByteBuffer[], ByteBuffer[]> generateData(int numBuffs, int minBuffSize, int maxBuffSize) { int totalSize = 0; int[] sizes = new int[numBuffs]; for (int i = 0; i < numBuffs; i++) { int size = minBuffSize; if (maxBuffSize != minBuffSize) { size += ThreadLocalRandom.current().nextInt(maxBuffSize - minBuffSize); } sizes[i] = size; totalSize += size; } byte[] bytes = new byte[totalSize]; ThreadLocalRandom.current().nextBytes(bytes); ByteBuffer[] data = new ByteBuffer[numBuffs]; int begin = 0; for (int i = 0; i < numBuffs; i++) { int end = begin + sizes[i]; data[i] = ByteBuffer.wrap(Arrays.copyOfRange(bytes, begin, end)); begin += sizes[i]; } int expectedNumBuffs = (int) Math.ceil((double) totalSize / maxBuffSize); ByteBuffer[] expectedData = new ByteBuffer[expectedNumBuffs]; for (int i = 0; i < expectedNumBuffs; i++) { begin = i * maxBuffSize; int end = Math.min((i + 1) * maxBuffSize, totalSize); expectedData[i] = ByteBuffer.wrap(Arrays.copyOfRange(bytes, begin, end)); } return Tuples.of(data, expectedData); } static byte[] byteBufferListToByteArray(List<ByteBuffer> buffers) { int totalSize = 0; for (ByteBuffer b: buffers) { totalSize += b.remaining(); } byte[] bytes = new byte[totalSize]; int begin = 0; for (ByteBuffer b: buffers) { System.arraycopy(b.array(), b.position(), bytes, begin, b.remaining()); begin += b.remaining(); } return bytes; } @ParameterizedTest @MethodSource("bufferStagingAreaSupplier") public void bufferStagingArea(int numBuffs, int minBuffSize, int maxBuffSize) { BufferStagingArea stagingArea = new BufferStagingArea(maxBuffSize, maxBuffSize); Tuple2<ByteBuffer[], ByteBuffer[]> generatedData = generateData(numBuffs, minBuffSize, maxBuffSize); Flux<ByteBuffer> data = Flux.fromArray(generatedData.getT1()); ByteBuffer[] expectedData = generatedData.getT2(); List<List<ByteBuffer>> recoveredData = data.flatMapSequential(stagingArea::write, 1) .concatWith(Flux.defer(stagingArea::flush)).flatMap(aggregator -> aggregator.asFlux().collectList()) .collectList().block(); assertEquals(expectedData.length, recoveredData.size()); for (int i = 0; i < expectedData.length; i++) { TestUtils.assertArraysEqual(expectedData[i].array(), byteBufferListToByteArray(recoveredData.get(i))); } } private static Stream<Arguments> bufferStagingAreaSupplier() { return Stream.of( Arguments.of(10, 1000, 1000), Arguments.of(100, 1000, 1000), Arguments.of(1000, 1000, 1000), Arguments.of(10000, 1000, 1000), Arguments.of(10000, 1, 1000), Arguments.of(100, 1, Constants.MB * 4), Arguments.of(100, Constants.MB * 4, Constants.MB * 8) ); } @ParameterizedTest @MethodSource("bufferStagingAreaWithSmallerBufferSupplier") private static Stream<Arguments> bufferStagingAreaWithSmallerBufferSupplier() { return Stream.of( Arguments.of(4 * Constants.KB, 4 * Constants.MB), Arguments.of(Constants.KB, 4 * Constants.MB), Arguments.of(25, Constants.KB), Arguments.of(2 * Constants.KB, 4 * Constants.MB), Arguments.of(10 * Constants.KB, 4 * Constants.MB) ); } }
Revisiting this when decoupling sync and async client tests
public void uploadAndDownloadDifferentRegionLength(int regionLength, int dataSize) { ByteBuffer data = getRandomData(dataSize); beac = mockAesKey(getEncryptedClientBuilder(fakeKey, null, ENV.getPrimaryAccount().getCredential(), cc.getBlobContainerUrl(), EncryptionVersion.V2) .blobName(generateBlobName()) .blobEncryptionOptions(new BlobEncryptionOptions().setAuthenticatedRegionDataLength(regionLength)) .buildEncryptedBlobAsyncClient()); beac.uploadWithResponse(new BlobParallelUploadOptions(Flux.just(data.duplicate()))).block(); ByteArrayOutputStream plaintextOut = new ByteArrayOutputStream(); new EncryptedBlobClient(beac).downloadStream(plaintextOut); assertArraysEqual(data.array(), plaintextOut.toByteArray()); }
.buildEncryptedBlobAsyncClient());
public void uploadAndDownloadDifferentRegionLength(int regionLength, int dataSize) { ByteBuffer data = getRandomData(dataSize); ebc = new EncryptedBlobClient(mockAesKey(getEncryptedClientBuilder(fakeKey, null, ENV.getPrimaryAccount().getCredential(), cc.getBlobContainerUrl(), EncryptionVersion.V2) .blobName(generateBlobName()) .clientSideEncryptionOptions(new BlobClientSideEncryptionOptions().setAuthenticatedRegionDataLengthInBytes(regionLength)) .buildEncryptedBlobAsyncClient())); ebc.uploadWithResponse(new BlobParallelUploadOptions(BinaryData.fromByteBuffer(data.duplicate())), null, null); ByteArrayOutputStream plaintextOut = new ByteArrayOutputStream(); ebc.downloadStream(plaintextOut); assertArraysEqual(data.array(), plaintextOut.toByteArray()); }
class NoOpKey implements AsyncKeyEncryptionKey { @Override public Mono<String> getKeyId() { return Mono.just("local:key1"); } @Override public Mono<byte[]> wrapKey(String algorithm, byte[] key) { if (!"None".equals(algorithm)) { throw new IllegalArgumentException(); } return Mono.just(key); } @Override public Mono<byte[]> unwrapKey(String algorithm, byte[] encryptedKey) { if (!"None".equals(algorithm)) { throw new IllegalArgumentException(); } return Mono.just(encryptedKey); } }
class NoOpKey implements AsyncKeyEncryptionKey { @Override public Mono<String> getKeyId() { return Mono.just("local:key1"); } @Override public Mono<byte[]> wrapKey(String algorithm, byte[] key) { if (!"None".equals(algorithm)) { throw new IllegalArgumentException(); } return Mono.just(key); } @Override public Mono<byte[]> unwrapKey(String algorithm, byte[] encryptedKey) { if (!"None".equals(algorithm)) { throw new IllegalArgumentException(); } return Mono.just(encryptedKey); } }
So for the V1 encryption data, we should not be setting the encryped region info. It's only ever present for v2. So let's remove it here.
public void testFromBlobRangeHeader(Integer offset, Integer count) { EncryptionData encryptionDataV1 = new EncryptionData() .setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V1)) .setEncryptedRegionInfo(new EncryptedRegionInfo(GCM_ENCRYPTION_REGION_LENGTH, NONCE_LENGTH)); EncryptionData encryptionDataV2 = new EncryptionData() .setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V2)) .setEncryptedRegionInfo(new EncryptedRegionInfo(GCM_ENCRYPTION_REGION_LENGTH, NONCE_LENGTH)); BlobRange range = getBlobRange(offset, count); EncryptedBlobRange encryptedRangeFromBlobRangeV1 = new EncryptedBlobRange(range, encryptionDataV1); EncryptedBlobRange encryptedRangeFromHeaderV1 = EncryptedBlobRange.getEncryptedBlobRangeFromHeader(range.toHeaderValue(), encryptionDataV1); EncryptedBlobRange encryptedRangeFromBlobRangeV2 = new EncryptedBlobRange(range, encryptionDataV2); EncryptedBlobRange encryptedRangeFromHeaderV2 = EncryptedBlobRange.getEncryptedBlobRangeFromHeader(range.toHeaderValue(), encryptionDataV2); assertEquals(encryptedRangeFromHeaderV1.toBlobRange().toHeaderValue(), encryptedRangeFromBlobRangeV1.toBlobRange().toHeaderValue()); assertEquals(encryptedRangeFromHeaderV2.toBlobRange().toHeaderValue(), encryptedRangeFromBlobRangeV2.toBlobRange().toHeaderValue()); }
.setEncryptedRegionInfo(new EncryptedRegionInfo(GCM_ENCRYPTION_REGION_LENGTH, NONCE_LENGTH));
public void testFromBlobRangeHeader(Integer offset, Integer count) { EncryptionData encryptionDataV1 = new EncryptionData() .setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V1)); EncryptionData encryptionDataV2 = new EncryptionData() .setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V2)) .setEncryptedRegionInfo(new EncryptedRegionInfo(GCM_ENCRYPTION_REGION_LENGTH, NONCE_LENGTH)); BlobRange range = getBlobRange(offset, count); EncryptedBlobRange encryptedRangeFromBlobRangeV1 = new EncryptedBlobRange(range, encryptionDataV1); EncryptedBlobRange encryptedRangeFromHeaderV1 = EncryptedBlobRange.getEncryptedBlobRangeFromHeader(range.toHeaderValue(), encryptionDataV1); EncryptedBlobRange encryptedRangeFromBlobRangeV2 = new EncryptedBlobRange(range, encryptionDataV2); EncryptedBlobRange encryptedRangeFromHeaderV2 = EncryptedBlobRange.getEncryptedBlobRangeFromHeader(range.toHeaderValue(), encryptionDataV2); assertEquals(encryptedRangeFromHeaderV1.toBlobRange().toHeaderValue(), encryptedRangeFromBlobRangeV1.toBlobRange().toHeaderValue()); assertEquals(encryptedRangeFromHeaderV2.toBlobRange().toHeaderValue(), encryptedRangeFromBlobRangeV2.toBlobRange().toHeaderValue()); }
class EncryptedBlobRangeTests extends BlobCryptographyTestBase { @ParameterizedTest @CsvSource(value = {",,bytes=0-", "3,,bytes=0-", "17,,bytes=0-", "34,,bytes=16-", "47,,bytes=16-", "48,,bytes=32-", "2,6,bytes=0-15", "18,2,bytes=0-31", "38,17,bytes=16-63"}) public void testConstructor(Integer offset, Integer count, String expectedString) { EncryptedBlobRange ebr = new EncryptedBlobRange(getBlobRange(offset, count), new EncryptionData().setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V1))); assertEquals(expectedString, ebr.toBlobRange().toString()); } @ParameterizedTest @MethodSource("constructorV2Supplier") public void constructorV2(Integer offset, Integer count, String expectedString) { EncryptedBlobRange ebr = new EncryptedBlobRange(getBlobRange(offset, count), new EncryptionData() .setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V2)) .setEncryptedRegionInfo(new EncryptedRegionInfo(GCM_ENCRYPTION_REGION_LENGTH, NONCE_LENGTH))); assertEquals(expectedString, ebr.toBlobRange().toString()); } private static Stream<Arguments> constructorV2Supplier() { return Stream.of( Arguments.of(null, null, "bytes=0-"), Arguments.arguments(3, null, "bytes=0-"), Arguments.of(GCM_ENCRYPTION_REGION_LENGTH - 1, null, "bytes=0-"), Arguments.of(GCM_ENCRYPTION_REGION_LENGTH + 1024, null, "bytes=4194332-"), Arguments.of(2 * GCM_ENCRYPTION_REGION_LENGTH, GCM_ENCRYPTION_REGION_LENGTH, "bytes=8388664-12582995"), Arguments.of(2, 6, "bytes=0-4194331"), Arguments.of(5000000, 5000000, "bytes=4194332-12582995"), Arguments.of(5000000, 20165000, "bytes=4194332-25165991") ); } @ParameterizedTest @CsvSource(value = {",", "3,", "17,", "34,", "47,", "48,", "2,6", "18,2", "38,17"}) private static BlobRange getBlobRange(Integer offset, Integer count) { if (offset == null && count == null) { return new BlobRange(0); } else if (count == null) { return new BlobRange(offset); } else { return new BlobRange(offset, (long) count); } } @ParameterizedTest @ValueSource(ints = { 16, 4 * Constants.KB, 4 * Constants.MB, Constants.GB }) public void encryptedBlobRangeFromEncryptionData(int regionLength) { long dataSize = 4 * Constants.MB; EncryptionData encryptionData = new EncryptionData() .setEncryptionAgent(new EncryptionAgent(ENCRYPTION_PROTOCOL_V2, EncryptionAlgorithm.AES_GCM_256)) .setEncryptedRegionInfo(new EncryptedRegionInfo(regionLength, NONCE_LENGTH)); EncryptedBlobRange encryptedBlobRange = new EncryptedBlobRange(new BlobRange(0, dataSize), encryptionData); int expectedRegionCount = (int) (dataSize - 1) / regionLength; int expectedAdjustedDownloadCount = (expectedRegionCount + 1) * (NONCE_LENGTH + regionLength + TAG_LENGTH); assertEquals(expectedAdjustedDownloadCount, encryptedBlobRange.getAdjustedDownloadCount()); } }
class EncryptedBlobRangeTests extends BlobCryptographyTestBase { @ParameterizedTest @CsvSource(value = {",,bytes=0-", "3,,bytes=0-", "17,,bytes=0-", "34,,bytes=16-", "47,,bytes=16-", "48,,bytes=32-", "2,6,bytes=0-15", "18,2,bytes=0-31", "38,17,bytes=16-63"}) public void testConstructor(Integer offset, Integer count, String expectedString) { EncryptedBlobRange ebr = new EncryptedBlobRange(getBlobRange(offset, count), new EncryptionData().setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V1))); assertEquals(expectedString, ebr.toBlobRange().toString()); } @ParameterizedTest @MethodSource("constructorV2Supplier") public void constructorV2(Integer offset, Integer count, String expectedString) { EncryptedBlobRange ebr = new EncryptedBlobRange(getBlobRange(offset, count), new EncryptionData() .setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V2)) .setEncryptedRegionInfo(new EncryptedRegionInfo(GCM_ENCRYPTION_REGION_LENGTH, NONCE_LENGTH))); assertEquals(expectedString, ebr.toBlobRange().toString()); } private static Stream<Arguments> constructorV2Supplier() { return Stream.of( Arguments.of(null, null, "bytes=0-"), Arguments.arguments(3, null, "bytes=0-"), Arguments.of(GCM_ENCRYPTION_REGION_LENGTH - 1, null, "bytes=0-"), Arguments.of(GCM_ENCRYPTION_REGION_LENGTH + 1024, null, "bytes=4194332-"), Arguments.of(2 * GCM_ENCRYPTION_REGION_LENGTH, GCM_ENCRYPTION_REGION_LENGTH, "bytes=8388664-12582995"), Arguments.of(2, 6, "bytes=0-4194331"), Arguments.of(5000000, 5000000, "bytes=4194332-12582995"), Arguments.of(5000000, 20165000, "bytes=4194332-25165991") ); } @ParameterizedTest @CsvSource(value = {",", "3,", "17,", "34,", "47,", "48,", "2,6", "18,2", "38,17"}) private static BlobRange getBlobRange(Integer offset, Integer count) { if (offset == null && count == null) { return new BlobRange(0); } else if (count == null) { return new BlobRange(offset); } else { return new BlobRange(offset, (long) count); } } @ParameterizedTest @ValueSource(ints = { 16, 4 * Constants.KB, 4 * Constants.MB, Constants.GB }) public void encryptedBlobRangeFromEncryptionData(int regionLength) { long dataSize = 4 * Constants.MB; EncryptionData encryptionData = new EncryptionData() .setEncryptionAgent(new EncryptionAgent(ENCRYPTION_PROTOCOL_V2, EncryptionAlgorithm.AES_GCM_256)) .setEncryptedRegionInfo(new EncryptedRegionInfo(regionLength, NONCE_LENGTH)); EncryptedBlobRange encryptedBlobRange = new EncryptedBlobRange(new BlobRange(0, dataSize), encryptionData); int expectedRegionCount = (int) (dataSize - 1) / regionLength; int expectedAdjustedDownloadCount = (expectedRegionCount + 1) * (NONCE_LENGTH + regionLength + TAG_LENGTH); assertEquals(expectedAdjustedDownloadCount, encryptedBlobRange.getAdjustedDownloadCount()); } @ParameterizedTest @MethodSource("provideRanges") public void testAdjustedBlobRange(int originalOffset, long originalCount, int expectedNewOffset, long expectedNewCount) { int regionLength = 16; EncryptionData encryptionData = new EncryptionData() .setEncryptionAgent(new EncryptionAgent(ENCRYPTION_PROTOCOL_V2, EncryptionAlgorithm.AES_GCM_256)) .setEncryptedRegionInfo(new EncryptedRegionInfo(regionLength, NONCE_LENGTH)); BlobRange originalRange = new BlobRange(originalOffset, originalCount); EncryptedBlobRange encryptedBlobRange = new EncryptedBlobRange(originalRange, encryptionData); BlobRange resultRange = encryptedBlobRange.toBlobRange(); assertEquals(expectedNewOffset, resultRange.getOffset(), "Adjusted offset does not match expected value."); assertEquals(expectedNewCount, resultRange.getCount(), "Adjusted count does not match expected value."); } private static Stream<Arguments> provideRanges() { return Stream.of( Arguments.of(5, 10, 0, 44), Arguments.of(16, 16, 44, 44), Arguments.of(15, 35, 0, 176), Arguments.of(32, 15, 88, 44) ); } }
Right now it seems like we are always setting the initial blob range to `0` and requesting the entire blob. It would be interesting to see if we can add some cases where we: 1. Do not set the original BlobRange offset to `0` and instead some position in the middle of the blob and assert that the new blob range when calling `toBlobRange` adjusts its starting position. 2. Set the ount of BlobRange to something other than the size of the encryption data and assertion that the new blob range from `toBlobRange` returns the expected count. I think to do that it may be worth just creating a new test case with parameterized cases and the arguments for the parameterized cases would be: originalOffset, OriginalCount, expectedNewOffset, and expectedNewCount. Then the cases would be something like: 1. The requested range is smaller and entirely in a single authentication region, but does not line up on region boundaries. 2. The requested range lies exactly on a region boundary 3. The request range lies on one of the region boundaries 4. The requested range straddles a region boundary We could even keep the region length the same and small (e.g. the minimum of 16) to make it easier to read how the logic should behave.
public void encryptedBlobRangeFromEncryptionData(int regionLength) { long dataSize = 4 * Constants.MB; EncryptionData encryptionData = new EncryptionData() .setEncryptionAgent(new EncryptionAgent(ENCRYPTION_PROTOCOL_V2, EncryptionAlgorithm.AES_GCM_256)) .setEncryptedRegionInfo(new EncryptedRegionInfo(regionLength, NONCE_LENGTH)); EncryptedBlobRange encryptedBlobRange = new EncryptedBlobRange(new BlobRange(0, dataSize), encryptionData); int expectedRegionCount = (int) (dataSize - 1) / regionLength; int expectedAdjustedDownloadCount = (expectedRegionCount + 1) * (NONCE_LENGTH + regionLength + TAG_LENGTH); assertEquals(expectedAdjustedDownloadCount, encryptedBlobRange.getAdjustedDownloadCount()); }
assertEquals(expectedAdjustedDownloadCount, encryptedBlobRange.getAdjustedDownloadCount());
public void encryptedBlobRangeFromEncryptionData(int regionLength) { long dataSize = 4 * Constants.MB; EncryptionData encryptionData = new EncryptionData() .setEncryptionAgent(new EncryptionAgent(ENCRYPTION_PROTOCOL_V2, EncryptionAlgorithm.AES_GCM_256)) .setEncryptedRegionInfo(new EncryptedRegionInfo(regionLength, NONCE_LENGTH)); EncryptedBlobRange encryptedBlobRange = new EncryptedBlobRange(new BlobRange(0, dataSize), encryptionData); int expectedRegionCount = (int) (dataSize - 1) / regionLength; int expectedAdjustedDownloadCount = (expectedRegionCount + 1) * (NONCE_LENGTH + regionLength + TAG_LENGTH); assertEquals(expectedAdjustedDownloadCount, encryptedBlobRange.getAdjustedDownloadCount()); }
class EncryptedBlobRangeTests extends BlobCryptographyTestBase { @ParameterizedTest @CsvSource(value = {",,bytes=0-", "3,,bytes=0-", "17,,bytes=0-", "34,,bytes=16-", "47,,bytes=16-", "48,,bytes=32-", "2,6,bytes=0-15", "18,2,bytes=0-31", "38,17,bytes=16-63"}) public void testConstructor(Integer offset, Integer count, String expectedString) { EncryptedBlobRange ebr = new EncryptedBlobRange(getBlobRange(offset, count), new EncryptionData().setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V1))); assertEquals(expectedString, ebr.toBlobRange().toString()); } @ParameterizedTest @MethodSource("constructorV2Supplier") public void constructorV2(Integer offset, Integer count, String expectedString) { EncryptedBlobRange ebr = new EncryptedBlobRange(getBlobRange(offset, count), new EncryptionData() .setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V2)) .setEncryptedRegionInfo(new EncryptedRegionInfo(GCM_ENCRYPTION_REGION_LENGTH, NONCE_LENGTH))); assertEquals(expectedString, ebr.toBlobRange().toString()); } private static Stream<Arguments> constructorV2Supplier() { return Stream.of( Arguments.of(null, null, "bytes=0-"), Arguments.arguments(3, null, "bytes=0-"), Arguments.of(GCM_ENCRYPTION_REGION_LENGTH - 1, null, "bytes=0-"), Arguments.of(GCM_ENCRYPTION_REGION_LENGTH + 1024, null, "bytes=4194332-"), Arguments.of(2 * GCM_ENCRYPTION_REGION_LENGTH, GCM_ENCRYPTION_REGION_LENGTH, "bytes=8388664-12582995"), Arguments.of(2, 6, "bytes=0-4194331"), Arguments.of(5000000, 5000000, "bytes=4194332-12582995"), Arguments.of(5000000, 20165000, "bytes=4194332-25165991") ); } @ParameterizedTest @CsvSource(value = {",", "3,", "17,", "34,", "47,", "48,", "2,6", "18,2", "38,17"}) public void testFromBlobRangeHeader(Integer offset, Integer count) { EncryptionData encryptionDataV1 = new EncryptionData() .setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V1)); EncryptionData encryptionDataV2 = new EncryptionData() .setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V2)) .setEncryptedRegionInfo(new EncryptedRegionInfo(GCM_ENCRYPTION_REGION_LENGTH, NONCE_LENGTH)); BlobRange range = getBlobRange(offset, count); EncryptedBlobRange encryptedRangeFromBlobRangeV1 = new EncryptedBlobRange(range, encryptionDataV1); EncryptedBlobRange encryptedRangeFromHeaderV1 = EncryptedBlobRange.getEncryptedBlobRangeFromHeader(range.toHeaderValue(), encryptionDataV1); EncryptedBlobRange encryptedRangeFromBlobRangeV2 = new EncryptedBlobRange(range, encryptionDataV2); EncryptedBlobRange encryptedRangeFromHeaderV2 = EncryptedBlobRange.getEncryptedBlobRangeFromHeader(range.toHeaderValue(), encryptionDataV2); assertEquals(encryptedRangeFromHeaderV1.toBlobRange().toHeaderValue(), encryptedRangeFromBlobRangeV1.toBlobRange().toHeaderValue()); assertEquals(encryptedRangeFromHeaderV2.toBlobRange().toHeaderValue(), encryptedRangeFromBlobRangeV2.toBlobRange().toHeaderValue()); } private static BlobRange getBlobRange(Integer offset, Integer count) { if (offset == null && count == null) { return new BlobRange(0); } else if (count == null) { return new BlobRange(offset); } else { return new BlobRange(offset, (long) count); } } @ParameterizedTest @ValueSource(ints = { 16, 4 * Constants.KB, 4 * Constants.MB, Constants.GB }) }
class EncryptedBlobRangeTests extends BlobCryptographyTestBase { @ParameterizedTest @CsvSource(value = {",,bytes=0-", "3,,bytes=0-", "17,,bytes=0-", "34,,bytes=16-", "47,,bytes=16-", "48,,bytes=32-", "2,6,bytes=0-15", "18,2,bytes=0-31", "38,17,bytes=16-63"}) public void testConstructor(Integer offset, Integer count, String expectedString) { EncryptedBlobRange ebr = new EncryptedBlobRange(getBlobRange(offset, count), new EncryptionData().setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V1))); assertEquals(expectedString, ebr.toBlobRange().toString()); } @ParameterizedTest @MethodSource("constructorV2Supplier") public void constructorV2(Integer offset, Integer count, String expectedString) { EncryptedBlobRange ebr = new EncryptedBlobRange(getBlobRange(offset, count), new EncryptionData() .setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V2)) .setEncryptedRegionInfo(new EncryptedRegionInfo(GCM_ENCRYPTION_REGION_LENGTH, NONCE_LENGTH))); assertEquals(expectedString, ebr.toBlobRange().toString()); } private static Stream<Arguments> constructorV2Supplier() { return Stream.of( Arguments.of(null, null, "bytes=0-"), Arguments.arguments(3, null, "bytes=0-"), Arguments.of(GCM_ENCRYPTION_REGION_LENGTH - 1, null, "bytes=0-"), Arguments.of(GCM_ENCRYPTION_REGION_LENGTH + 1024, null, "bytes=4194332-"), Arguments.of(2 * GCM_ENCRYPTION_REGION_LENGTH, GCM_ENCRYPTION_REGION_LENGTH, "bytes=8388664-12582995"), Arguments.of(2, 6, "bytes=0-4194331"), Arguments.of(5000000, 5000000, "bytes=4194332-12582995"), Arguments.of(5000000, 20165000, "bytes=4194332-25165991") ); } @ParameterizedTest @CsvSource(value = {",", "3,", "17,", "34,", "47,", "48,", "2,6", "18,2", "38,17"}) public void testFromBlobRangeHeader(Integer offset, Integer count) { EncryptionData encryptionDataV1 = new EncryptionData() .setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V1)); EncryptionData encryptionDataV2 = new EncryptionData() .setEncryptionAgent(new EncryptionAgent().setProtocol(ENCRYPTION_PROTOCOL_V2)) .setEncryptedRegionInfo(new EncryptedRegionInfo(GCM_ENCRYPTION_REGION_LENGTH, NONCE_LENGTH)); BlobRange range = getBlobRange(offset, count); EncryptedBlobRange encryptedRangeFromBlobRangeV1 = new EncryptedBlobRange(range, encryptionDataV1); EncryptedBlobRange encryptedRangeFromHeaderV1 = EncryptedBlobRange.getEncryptedBlobRangeFromHeader(range.toHeaderValue(), encryptionDataV1); EncryptedBlobRange encryptedRangeFromBlobRangeV2 = new EncryptedBlobRange(range, encryptionDataV2); EncryptedBlobRange encryptedRangeFromHeaderV2 = EncryptedBlobRange.getEncryptedBlobRangeFromHeader(range.toHeaderValue(), encryptionDataV2); assertEquals(encryptedRangeFromHeaderV1.toBlobRange().toHeaderValue(), encryptedRangeFromBlobRangeV1.toBlobRange().toHeaderValue()); assertEquals(encryptedRangeFromHeaderV2.toBlobRange().toHeaderValue(), encryptedRangeFromBlobRangeV2.toBlobRange().toHeaderValue()); } private static BlobRange getBlobRange(Integer offset, Integer count) { if (offset == null && count == null) { return new BlobRange(0); } else if (count == null) { return new BlobRange(offset); } else { return new BlobRange(offset, (long) count); } } @ParameterizedTest @ValueSource(ints = { 16, 4 * Constants.KB, 4 * Constants.MB, Constants.GB }) @ParameterizedTest @MethodSource("provideRanges") public void testAdjustedBlobRange(int originalOffset, long originalCount, int expectedNewOffset, long expectedNewCount) { int regionLength = 16; EncryptionData encryptionData = new EncryptionData() .setEncryptionAgent(new EncryptionAgent(ENCRYPTION_PROTOCOL_V2, EncryptionAlgorithm.AES_GCM_256)) .setEncryptedRegionInfo(new EncryptedRegionInfo(regionLength, NONCE_LENGTH)); BlobRange originalRange = new BlobRange(originalOffset, originalCount); EncryptedBlobRange encryptedBlobRange = new EncryptedBlobRange(originalRange, encryptionData); BlobRange resultRange = encryptedBlobRange.toBlobRange(); assertEquals(expectedNewOffset, resultRange.getOffset(), "Adjusted offset does not match expected value."); assertEquals(expectedNewCount, resultRange.getCount(), "Adjusted count does not match expected value."); } private static Stream<Arguments> provideRanges() { return Stream.of( Arguments.of(5, 10, 0, 44), Arguments.of(16, 16, 44, 44), Arguments.of(15, 35, 0, 176), Arguments.of(32, 15, 88, 44) ); } }
I'd recommend to extract this into a separate method.
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) { if (!isKeyVaultClientOnClasspath()) { logger.debug(String.format(SKIP_CONFIGURE_REASON_FORMAT, "com.azure:azure-security-keyvault-secrets doesn't exist in classpath")); return; } final AzureKeyVaultSecretProperties secretProperties = loadProperties(environment); if (!secretProperties.isPropertySourceEnabled()) { logger.debug(String.format(SKIP_CONFIGURE_REASON_FORMAT, "spring.cloud.azure.keyvault.secret.property-source-enabled=false")); return; } if (secretProperties.getPropertySources().isEmpty()) { logger.debug(String.format(SKIP_CONFIGURE_REASON_FORMAT, "spring.cloud.azure.keyvault.secret.property-sources is empty")); return; } final List<AzureKeyVaultPropertySourceProperties> propertiesList = secretProperties.getPropertySources(); List<String> sourceNames = propertiesList.stream() .map(AzureKeyVaultPropertySourceProperties::getName) .toList(); Set<String> deduplicatedSourceNames = new HashSet<>(sourceNames); if (propertiesList.size() != deduplicatedSourceNames.size()) { throw new IllegalStateException("Duplicate property source name found: " + sourceNames); } List<KeyVaultPropertySource> keyVaultPropertySources = buildKeyVaultPropertySourceList(propertiesList); final MutablePropertySources propertySources = environment.getPropertySources(); for (int i = keyVaultPropertySources.size() - 1; i >= 0; i--) { KeyVaultPropertySource propertySource = keyVaultPropertySources.get(i); logger.debug("Inserting Key Vault PropertySource. name = " + propertySource.getName()); if (propertySources.contains(SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME)) { propertySources.addAfter(SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME, propertySource); } else { propertySources.addFirst(propertySource); } } }
}
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) { if (!isKeyVaultClientOnClasspath()) { logger.debug(String.format(SKIP_CONFIGURE_REASON_FORMAT, "com.azure:azure-security-keyvault-secrets doesn't exist in classpath")); return; } final AzureKeyVaultSecretProperties secretProperties = loadProperties(environment); if (!secretProperties.isPropertySourceEnabled()) { logger.debug(String.format(SKIP_CONFIGURE_REASON_FORMAT, "spring.cloud.azure.keyvault.secret.property-source-enabled=false")); return; } if (secretProperties.getPropertySources().isEmpty()) { logger.debug(String.format(SKIP_CONFIGURE_REASON_FORMAT, "spring.cloud.azure.keyvault.secret.property-sources is empty")); return; } final List<AzureKeyVaultPropertySourceProperties> propertiesList = secretProperties.getPropertySources(); checkDuplicatePropertySourceNames(propertiesList); List<KeyVaultPropertySource> keyVaultPropertySources = buildKeyVaultPropertySourceList(propertiesList); final MutablePropertySources propertySources = environment.getPropertySources(); for (int i = keyVaultPropertySources.size() - 1; i >= 0; i--) { KeyVaultPropertySource propertySource = keyVaultPropertySources.get(i); logger.debug("Inserting Key Vault PropertySource. name = " + propertySource.getName()); if (propertySources.contains(SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME)) { propertySources.addAfter(SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME, propertySource); } else { propertySources.addFirst(propertySource); } } }
class KeyVaultEnvironmentPostProcessor implements EnvironmentPostProcessor, Ordered { /** * The order value of the {@link KeyVaultEnvironmentPostProcessor}. */ public static final int ORDER = ConfigDataEnvironmentPostProcessor.ORDER + 1; private static final String SKIP_CONFIGURE_REASON_FORMAT = "Skip configuring Key Vault PropertySource because %s."; private final Log logger; private final ConfigurableBootstrapContext bootstrapContext; /** * Creates a new instance of {@link KeyVaultEnvironmentPostProcessor}. * @param loggerFactory The logger factory to get the logger. * @param bootstrapContext The bootstrap context. */ public KeyVaultEnvironmentPostProcessor(DeferredLogFactory loggerFactory, ConfigurableBootstrapContext bootstrapContext) { this.logger = loggerFactory.getLog(getClass()); this.bootstrapContext = bootstrapContext; } /** * Construct {@link KeyVaultPropertySource}s according to {@link AzureKeyVaultSecretProperties}, * then insert these {@link KeyVaultPropertySource}s into {@link ConfigurableEnvironment}. * * @param environment the environment. * @param application the application. */ @Override private List<KeyVaultPropertySource> buildKeyVaultPropertySourceList( List<AzureKeyVaultPropertySourceProperties> propertiesList) { List<KeyVaultPropertySource> propertySources = new ArrayList<>(); for (int i = 0; i < propertiesList.size(); i++) { AzureKeyVaultPropertySourceProperties properties = propertiesList.get(i); if (!properties.isEnabled()) { logger.debug(String.format(SKIP_CONFIGURE_REASON_FORMAT, "spring.cloud.azure.keyvault.secret.property-sources[" + i + "].enabled = false")); continue; } if (!StringUtils.hasText(properties.getEndpoint())) { logger.debug(String.format(SKIP_CONFIGURE_REASON_FORMAT, "spring.cloud.azure.keyvault.secret.property-sources[" + i + "].endpoint is empty")); continue; } propertySources.add(buildKeyVaultPropertySource(properties)); } return propertySources; } private KeyVaultPropertySource buildKeyVaultPropertySource( AzureKeyVaultPropertySourceProperties properties) { try { final KeyVaultOperation keyVaultOperation = new KeyVaultOperation( buildSecretClient(properties), properties.getSecretKeys(), properties.isCaseSensitive()); return new KeyVaultPropertySource( properties.getName(), properties.getRefreshInterval(), keyVaultOperation, properties.isCaseSensitive()); } catch (final Exception exception) { throw new IllegalStateException("Failed to configure KeyVault property source '" + properties.getName() + "'", exception); } } private SecretClient buildSecretClient(AzureKeyVaultPropertySourceProperties propertySourceProperties) { AzureKeyVaultSecretProperties secretProperties = toAzureKeyVaultSecretProperties(propertySourceProperties); return buildSecretClient(secretProperties); } private AzureKeyVaultSecretProperties toAzureKeyVaultSecretProperties( AzureKeyVaultPropertySourceProperties propertySourceProperties) { AzureKeyVaultSecretProperties secretProperties = new AzureKeyVaultSecretProperties(); AzurePropertiesUtils.copyAzureCommonProperties(propertySourceProperties, secretProperties); secretProperties.setEndpoint(propertySourceProperties.getEndpoint()); secretProperties.setServiceVersion(propertySourceProperties.getServiceVersion()); secretProperties.setChallengeResourceVerificationEnabled(propertySourceProperties.isChallengeResourceVerificationEnabled()); return secretProperties; } /** * Build a KeyVault Secret client * @param secretProperties secret properties * @return secret client */ SecretClient buildSecretClient(AzureKeyVaultSecretProperties secretProperties) { SecretClientBuilderFactory factory = new SecretClientBuilderFactory(secretProperties); factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_KEY_VAULT_SECRETS); if (bootstrapContext != null && bootstrapContext.isRegistered(TokenCredential.class)) { TokenCredential registerCredential = bootstrapContext.get(TokenCredential.class); logger.debug(registerCredential.getClass().getSimpleName() + " is registered in bootstrap context, use it to build SecretClient."); factory.setTokenCredentialResolver( new AzureTokenCredentialResolver(ignored -> registerCredential) ); } return factory.build().buildClient(); } AzureKeyVaultSecretProperties loadProperties(ConfigurableEnvironment environment) { Binder binder = Binder.get(environment); AzureGlobalProperties globalProperties = binder .bind(AzureGlobalProperties.PREFIX, Bindable.of(AzureGlobalProperties.class)) .orElseGet(AzureGlobalProperties::new); AzureKeyVaultSecretProperties secretProperties = binder .bind(AzureKeyVaultSecretProperties.PREFIX, Bindable.of(AzureKeyVaultSecretProperties.class)) .orElseGet(AzureKeyVaultSecretProperties::new); List<AzureKeyVaultPropertySourceProperties> list = secretProperties.getPropertySources(); for (int i = 0; i < list.size(); i++) { list.set(i, buildMergedProperties(globalProperties, list.get(i))); } for (int i = 0; i < list.size(); i++) { AzureKeyVaultPropertySourceProperties propertySourceProperties = list.get(i); if (!StringUtils.hasText(propertySourceProperties.getName())) { propertySourceProperties.setName(buildPropertySourceName(i)); } } return secretProperties; } private AzureKeyVaultPropertySourceProperties buildMergedProperties( AzureGlobalProperties globalProperties, AzureKeyVaultPropertySourceProperties propertySourceProperties) { AzureKeyVaultPropertySourceProperties mergedProperties = new AzureKeyVaultPropertySourceProperties(); AzurePropertiesUtils.mergeAzureCommonProperties(globalProperties, propertySourceProperties, mergedProperties); mergedProperties.setEnabled(propertySourceProperties.isEnabled()); mergedProperties.setName(propertySourceProperties.getName()); mergedProperties.setEndpoint(propertySourceProperties.getEndpoint()); mergedProperties.setServiceVersion(propertySourceProperties.getServiceVersion()); mergedProperties.setCaseSensitive(propertySourceProperties.isCaseSensitive()); mergedProperties.setSecretKeys(propertySourceProperties.getSecretKeys()); mergedProperties.setRefreshInterval(propertySourceProperties.getRefreshInterval()); mergedProperties.setChallengeResourceVerificationEnabled(propertySourceProperties.isChallengeResourceVerificationEnabled()); return mergedProperties; } String buildPropertySourceName(int index) { return "azure-key-vault-secret-property-source-" + index; } private boolean isKeyVaultClientOnClasspath() { return ClassUtils.isPresent("com.azure.security.keyvault.secrets.SecretClient", KeyVaultEnvironmentPostProcessor.class.getClassLoader()); } /** * Get the order value of this object. * @return The order value. */ @Override public int getOrder() { return ORDER; } }
class KeyVaultEnvironmentPostProcessor implements EnvironmentPostProcessor, Ordered { /** * The order value of the {@link KeyVaultEnvironmentPostProcessor}. */ public static final int ORDER = ConfigDataEnvironmentPostProcessor.ORDER + 1; private static final String SKIP_CONFIGURE_REASON_FORMAT = "Skip configuring Key Vault PropertySource because %s."; private final Log logger; private final ConfigurableBootstrapContext bootstrapContext; /** * Creates a new instance of {@link KeyVaultEnvironmentPostProcessor}. * @param loggerFactory The logger factory to get the logger. * @param bootstrapContext The bootstrap context. */ public KeyVaultEnvironmentPostProcessor(DeferredLogFactory loggerFactory, ConfigurableBootstrapContext bootstrapContext) { this.logger = loggerFactory.getLog(getClass()); this.bootstrapContext = bootstrapContext; } /** * Construct {@link KeyVaultPropertySource}s according to {@link AzureKeyVaultSecretProperties}, * then insert these {@link KeyVaultPropertySource}s into {@link ConfigurableEnvironment}. * * @param environment the environment. * @param application the application. */ @Override private void checkDuplicatePropertySourceNames(List<AzureKeyVaultPropertySourceProperties> propertiesList) { List<String> sourceNames = propertiesList.stream() .map(AzureKeyVaultPropertySourceProperties::getName) .toList(); Set<String> deduplicatedSourceNames = new HashSet<>(sourceNames); if (propertiesList.size() != deduplicatedSourceNames.size()) { throw new IllegalStateException("Duplicate property source name found: " + sourceNames); } } private List<KeyVaultPropertySource> buildKeyVaultPropertySourceList( List<AzureKeyVaultPropertySourceProperties> propertiesList) { List<KeyVaultPropertySource> propertySources = new ArrayList<>(); for (int i = 0; i < propertiesList.size(); i++) { AzureKeyVaultPropertySourceProperties properties = propertiesList.get(i); if (!properties.isEnabled()) { logger.debug(String.format(SKIP_CONFIGURE_REASON_FORMAT, "spring.cloud.azure.keyvault.secret.property-sources[" + i + "].enabled = false")); continue; } if (!StringUtils.hasText(properties.getEndpoint())) { logger.debug(String.format(SKIP_CONFIGURE_REASON_FORMAT, "spring.cloud.azure.keyvault.secret.property-sources[" + i + "].endpoint is empty")); continue; } propertySources.add(buildKeyVaultPropertySource(properties)); } return propertySources; } private KeyVaultPropertySource buildKeyVaultPropertySource( AzureKeyVaultPropertySourceProperties properties) { try { final KeyVaultOperation keyVaultOperation = new KeyVaultOperation(buildSecretClient(properties)); return new KeyVaultPropertySource( properties.getName(), properties.getRefreshInterval(), keyVaultOperation, properties.getSecretKeys(), properties.isCaseSensitive()); } catch (final Exception exception) { throw new IllegalStateException("Failed to configure KeyVault property source '" + properties.getName() + "'", exception); } } private SecretClient buildSecretClient(AzureKeyVaultPropertySourceProperties propertySourceProperties) { AzureKeyVaultSecretProperties secretProperties = toAzureKeyVaultSecretProperties(propertySourceProperties); return buildSecretClient(secretProperties); } private AzureKeyVaultSecretProperties toAzureKeyVaultSecretProperties( AzureKeyVaultPropertySourceProperties propertySourceProperties) { AzureKeyVaultSecretProperties secretProperties = new AzureKeyVaultSecretProperties(); AzurePropertiesUtils.copyAzureCommonProperties(propertySourceProperties, secretProperties); secretProperties.setEndpoint(propertySourceProperties.getEndpoint()); secretProperties.setServiceVersion(propertySourceProperties.getServiceVersion()); secretProperties.setChallengeResourceVerificationEnabled(propertySourceProperties.isChallengeResourceVerificationEnabled()); return secretProperties; } /** * Build a KeyVault Secret client * @param secretProperties secret properties * @return secret client */ SecretClient buildSecretClient(AzureKeyVaultSecretProperties secretProperties) { SecretClientBuilderFactory factory = new SecretClientBuilderFactory(secretProperties); factory.setSpringIdentifier(AzureSpringIdentifier.AZURE_SPRING_KEY_VAULT_SECRETS); if (bootstrapContext != null && bootstrapContext.isRegistered(TokenCredential.class)) { TokenCredential registerCredential = bootstrapContext.get(TokenCredential.class); logger.debug(registerCredential.getClass().getSimpleName() + " is registered in bootstrap context, use it to build SecretClient."); factory.setTokenCredentialResolver( new AzureTokenCredentialResolver(ignored -> registerCredential) ); } return factory.build().buildClient(); } AzureKeyVaultSecretProperties loadProperties(ConfigurableEnvironment environment) { Binder binder = Binder.get(environment); AzureGlobalProperties globalProperties = binder .bind(AzureGlobalProperties.PREFIX, Bindable.of(AzureGlobalProperties.class)) .orElseGet(AzureGlobalProperties::new); AzureKeyVaultSecretProperties secretProperties = binder .bind(AzureKeyVaultSecretProperties.PREFIX, Bindable.of(AzureKeyVaultSecretProperties.class)) .orElseGet(AzureKeyVaultSecretProperties::new); List<AzureKeyVaultPropertySourceProperties> list = secretProperties.getPropertySources(); for (int i = 0; i < list.size(); i++) { list.set(i, buildMergedProperties(globalProperties, list.get(i))); } for (int i = 0; i < list.size(); i++) { AzureKeyVaultPropertySourceProperties propertySourceProperties = list.get(i); if (!StringUtils.hasText(propertySourceProperties.getName())) { propertySourceProperties.setName(buildPropertySourceName(i)); } } return secretProperties; } private AzureKeyVaultPropertySourceProperties buildMergedProperties( AzureGlobalProperties globalProperties, AzureKeyVaultPropertySourceProperties propertySourceProperties) { AzureKeyVaultPropertySourceProperties mergedProperties = new AzureKeyVaultPropertySourceProperties(); AzurePropertiesUtils.mergeAzureCommonProperties(globalProperties, propertySourceProperties, mergedProperties); mergedProperties.setEnabled(propertySourceProperties.isEnabled()); mergedProperties.setName(propertySourceProperties.getName()); mergedProperties.setEndpoint(propertySourceProperties.getEndpoint()); mergedProperties.setServiceVersion(propertySourceProperties.getServiceVersion()); mergedProperties.setCaseSensitive(propertySourceProperties.isCaseSensitive()); mergedProperties.setSecretKeys(propertySourceProperties.getSecretKeys()); mergedProperties.setRefreshInterval(propertySourceProperties.getRefreshInterval()); mergedProperties.setChallengeResourceVerificationEnabled(propertySourceProperties.isChallengeResourceVerificationEnabled()); return mergedProperties; } String buildPropertySourceName(int index) { return "azure-key-vault-secret-property-source-" + index; } private boolean isKeyVaultClientOnClasspath() { return ClassUtils.isPresent("com.azure.security.keyvault.secrets.SecretClient", KeyVaultEnvironmentPostProcessor.class.getClassLoader()); } /** * Get the order value of this object. * @return The order value. */ @Override public int getOrder() { return ORDER; } }
Changelog? Even though I haven't seen anyone use delivery annotations in EH ... Can we copy this logic over there? (Iirc we allow them to access the raw AMQP message so it could be possible)
public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int deliveryAnnotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (deliveryAnnotations != null) { final Map<Symbol, Object> map = deliveryAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); deliveryAnnotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + deliveryAnnotationsSize + applicationPropertiesSize + payloadSize; }
final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations();
public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int deliveryAnnotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (deliveryAnnotations != null) { final Map<Symbol, Object> map = deliveryAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); deliveryAnnotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + deliveryAnnotationsSize + applicationPropertiesSize + payloadSize; }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw LOGGER.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw LOGGER.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { LOGGER.atWarning() .addKeyValue("statusCode", statusCode) .log("AMQP response did not contain OK status code."); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { LOGGER.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { LOGGER.atWarning() .addKeyValue("expectedType", Map.class) .addKeyValue("actualType", responseBodyMap.getClass()) .log("AMQP response body is not correct instance."); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { LOGGER.atWarning().addKeyValue("expectedKey", ManagementConstants.MESSAGES) .log("AMQP response body did not contain key."); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { LOGGER.atWarning() .addKeyValue("expectedType", Iterable.class) .addKeyValue("actualType", messages.getClass()) .log("Response body contents is not the correct type."); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { LOGGER.atWarning() .addKeyValue("expectedType", Map.class) .addKeyValue("actualType", message.getClass()) .log("Message inside iterable of message is not correct type."); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { LOGGER.atWarning() .addKeyValue("actualType", body.getType()) .log("Message body is not correct. Not setting body contents."); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { LOGGER.atWarning() .addKeyValue("actualType", "null") .log("Message body is not correct. Not setting body contents."); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithDeliveryTag) { brokeredMessage.setLockToken(((MessageWithDeliveryTag) amqpMessage).getDeliveryTag()); } else if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); } }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw LOGGER.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw LOGGER.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { LOGGER.atWarning() .addKeyValue("statusCode", statusCode) .log("AMQP response did not contain OK status code."); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { LOGGER.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { LOGGER.atWarning() .addKeyValue("expectedType", Map.class) .addKeyValue("actualType", responseBodyMap.getClass()) .log("AMQP response body is not correct instance."); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { LOGGER.atWarning().addKeyValue("expectedKey", ManagementConstants.MESSAGES) .log("AMQP response body did not contain key."); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { LOGGER.atWarning() .addKeyValue("expectedType", Iterable.class) .addKeyValue("actualType", messages.getClass()) .log("Response body contents is not the correct type."); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { LOGGER.atWarning() .addKeyValue("expectedType", Map.class) .addKeyValue("actualType", message.getClass()) .log("Message inside iterable of message is not correct type."); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { LOGGER.atWarning() .addKeyValue("actualType", body.getType()) .log("Message body is not correct. Not setting body contents."); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { LOGGER.atWarning() .addKeyValue("actualType", "null") .log("Message body is not correct. Not setting body contents."); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithDeliveryTag) { brokeredMessage.setLockToken(((MessageWithDeliveryTag) amqpMessage).getDeliveryTag()); } else if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); } }
> Even though I haven't seen anyone use delivery annotations in EH ... Can we copy this logic over there? (Iirc we allow them to access the raw AMQP message so it could be possible) Good point, will update EventHubSerializer::getSize as well
public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int deliveryAnnotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (deliveryAnnotations != null) { final Map<Symbol, Object> map = deliveryAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); deliveryAnnotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + deliveryAnnotationsSize + applicationPropertiesSize + payloadSize; }
final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations();
public int getSize(Message amqpMessage) { if (amqpMessage == null) { return 0; } int payloadSize = getPayloadSize(amqpMessage); final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); int annotationsSize = 0; int deliveryAnnotationsSize = 0; int applicationPropertiesSize = 0; if (messageAnnotations != null) { final Map<Symbol, Object> map = messageAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); annotationsSize += size; } } if (deliveryAnnotations != null) { final Map<Symbol, Object> map = deliveryAnnotations.getValue(); for (Map.Entry<Symbol, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); deliveryAnnotationsSize += size; } } if (applicationProperties != null) { final Map<String, Object> map = applicationProperties.getValue(); for (Map.Entry<String, Object> entry : map.entrySet()) { final int size = sizeof(entry.getKey()) + sizeof(entry.getValue()); applicationPropertiesSize += size; } } return annotationsSize + deliveryAnnotationsSize + applicationPropertiesSize + payloadSize; }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw LOGGER.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw LOGGER.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { LOGGER.atWarning() .addKeyValue("statusCode", statusCode) .log("AMQP response did not contain OK status code."); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { LOGGER.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { LOGGER.atWarning() .addKeyValue("expectedType", Map.class) .addKeyValue("actualType", responseBodyMap.getClass()) .log("AMQP response body is not correct instance."); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { LOGGER.atWarning().addKeyValue("expectedKey", ManagementConstants.MESSAGES) .log("AMQP response body did not contain key."); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { LOGGER.atWarning() .addKeyValue("expectedType", Iterable.class) .addKeyValue("actualType", messages.getClass()) .log("Response body contents is not the correct type."); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { LOGGER.atWarning() .addKeyValue("expectedType", Map.class) .addKeyValue("actualType", message.getClass()) .log("Message inside iterable of message is not correct type."); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { LOGGER.atWarning() .addKeyValue("actualType", body.getType()) .log("Message body is not correct. Not setting body contents."); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { LOGGER.atWarning() .addKeyValue("actualType", "null") .log("Message body is not correct. Not setting body contents."); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithDeliveryTag) { brokeredMessage.setLockToken(((MessageWithDeliveryTag) amqpMessage).getDeliveryTag()); } else if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); } }
class ServiceBusMessageSerializer implements MessageSerializer { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusMessageSerializer.class); /** * Gets the serialized size of the AMQP message. */ @Override /** * Creates the AMQP message represented by this {@code object}. Currently, only supports serializing {@link * ServiceBusMessage}. * * @param object Concrete object to deserialize. * * @return A new AMQP message for this {@code object}. * * @throws IllegalArgumentException if {@code object} is not an instance of {@link ServiceBusMessage}. */ @Override public <T> Message serialize(T object) { Objects.requireNonNull(object, "'object' to serialize cannot be null."); if (!(object instanceof ServiceBusMessage)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "Cannot serialize object that is not ServiceBusMessage. Clazz: " + object.getClass())); } final ServiceBusMessage brokeredMessage = (ServiceBusMessage) object; AmqpMessageBodyType brokeredBodyType = brokeredMessage.getRawAmqpMessage().getBody().getBodyType(); final Message amqpMessage = Proton.message(); byte[] body; if (brokeredBodyType == AmqpMessageBodyType.DATA || brokeredBodyType == null) { body = brokeredMessage.getBody().toBytes(); amqpMessage.setBody(new Data(new Binary(body))); } else if (brokeredBodyType == AmqpMessageBodyType.SEQUENCE) { List<Object> sequenceList = brokeredMessage.getRawAmqpMessage().getBody().getSequence(); amqpMessage.setBody(new AmqpSequence(sequenceList)); } else if (brokeredBodyType == AmqpMessageBodyType.VALUE) { amqpMessage.setBody(new AmqpValue(brokeredMessage.getRawAmqpMessage().getBody().getValue())); } if (brokeredMessage.getApplicationProperties() != null) { Map<String, Object> describedTypeMap = convertToDescribedType(brokeredMessage.getApplicationProperties()); amqpMessage.setApplicationProperties(new ApplicationProperties(describedTypeMap)); } if (brokeredMessage.getTimeToLive() != null) { amqpMessage.setTtl(brokeredMessage.getTimeToLive().toMillis()); } if (amqpMessage.getProperties() == null) { amqpMessage.setProperties(new Properties()); } amqpMessage.setMessageId(brokeredMessage.getMessageId()); amqpMessage.setContentType(brokeredMessage.getContentType()); amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId()); amqpMessage.setSubject(brokeredMessage.getSubject()); amqpMessage.setReplyTo(brokeredMessage.getReplyTo()); amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId()); amqpMessage.setGroupId(brokeredMessage.getSessionId()); final AmqpMessageProperties brokeredProperties = brokeredMessage.getRawAmqpMessage().getProperties(); amqpMessage.setContentEncoding(brokeredProperties.getContentEncoding()); if (brokeredProperties.getGroupSequence() != null) { amqpMessage.setGroupSequence(brokeredProperties.getGroupSequence()); } amqpMessage.getProperties().setTo(brokeredMessage.getTo()); amqpMessage.getProperties().setUserId(new Binary(brokeredProperties.getUserId())); if (brokeredProperties.getAbsoluteExpiryTime() != null) { amqpMessage.getProperties().setAbsoluteExpiryTime(Date.from(brokeredProperties.getAbsoluteExpiryTime() .toInstant())); } if (brokeredProperties.getCreationTime() != null) { amqpMessage.getProperties().setCreationTime(Date.from(brokeredProperties.getCreationTime().toInstant())); } amqpMessage.setFooter(new Footer(brokeredMessage.getRawAmqpMessage().getFooter())); AmqpMessageHeader header = brokeredMessage.getRawAmqpMessage().getHeader(); if (header.getDeliveryCount() != null) { amqpMessage.setDeliveryCount(header.getDeliveryCount()); } if (header.getPriority() != null) { amqpMessage.setPriority(header.getPriority()); } if (header.isDurable() != null) { amqpMessage.setDurable(header.isDurable()); } if (header.isFirstAcquirer() != null) { amqpMessage.setFirstAcquirer(header.isFirstAcquirer()); } if (header.getTimeToLive() != null) { amqpMessage.setTtl(header.getTimeToLive().toMillis()); } final Map<Symbol, Object> messageAnnotationsMap = new HashMap<>(); if (brokeredMessage.getScheduledEnqueueTime() != null) { messageAnnotationsMap.put(Symbol.valueOf(SCHEDULED_ENQUEUE_UTC_TIME_NAME.getValue()), Date.from(brokeredMessage.getScheduledEnqueueTime().toInstant())); } final String partitionKey = brokeredMessage.getPartitionKey(); if (partitionKey != null && !partitionKey.isEmpty()) { messageAnnotationsMap.put(Symbol.valueOf(PARTITION_KEY_ANNOTATION_NAME.getValue()), brokeredMessage.getPartitionKey()); } amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap)); final Map<Symbol, Object> deliveryAnnotationsMap = new HashMap<>(); final Map<String, Object> deliveryAnnotations = brokeredMessage.getRawAmqpMessage() .getDeliveryAnnotations(); for (Map.Entry<String, Object> deliveryEntry : deliveryAnnotations.entrySet()) { deliveryAnnotationsMap.put(Symbol.valueOf(deliveryEntry.getKey()), deliveryEntry.getValue()); } amqpMessage.setDeliveryAnnotations(new DeliveryAnnotations(deliveryAnnotationsMap)); return amqpMessage; } /** * Convert specific type to described type for sending on the wire. * @param propertiesValue application properties set by user which may contain specific type. * @return Map only contains primitive type and described type. */ private static Map<String, Object> convertToDescribedType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof URI) { entry.setValue(new UriDescribedType((URI) value)); } else if (value instanceof OffsetDateTime) { entry.setValue(new OffsetDateTimeDescribedType((OffsetDateTime) value)); } else if (value instanceof Duration) { entry.setValue(new DurationDescribedType((Duration) value)); } } return propertiesValue; } /** * Convert described type to origin type. * @param propertiesValue application properties from amqp message may contain described type. * @return Map without described type. */ private static Map<String, Object> convertToOriginType(Map<String, Object> propertiesValue) { for (Map.Entry<String, Object> entry : propertiesValue.entrySet()) { Object value = entry.getValue(); if (value instanceof DescribedType) { entry.setValue(MessageUtils.describedToOrigin((DescribedType) value)); } } return propertiesValue; } @SuppressWarnings("unchecked") @Override public <T> T deserialize(Message message, Class<T> clazz) { Objects.requireNonNull(message, "'message' cannot be null."); Objects.requireNonNull(clazz, "'clazz' cannot be null."); if (clazz == ServiceBusReceivedMessage.class) { return (T) deserializeMessage(message); } else { throw LOGGER.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } @SuppressWarnings("unchecked") @Override public <T> List<T> deserializeList(Message message, Class<T> clazz) { if (clazz == ServiceBusReceivedMessage.class) { return (List<T>) deserializeListOfMessages(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == OffsetDateTime.class) { return (List<T>) deserializeListOfOffsetDateTime(message); } else if (clazz == Long.class) { return (List<T>) deserializeListOfLong(message); } else { throw LOGGER.logExceptionAsError(new IllegalArgumentException( String.format(Messages.CLASS_NOT_A_SUPPORTED_TYPE, clazz))); } } private List<Long> deserializeListOfLong(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.SEQUENCE_NUMBERS); if (expirationListObj instanceof long[]) { return Arrays.stream((long[]) expirationListObj) .boxed() .collect(Collectors.toList()); } } } return Collections.emptyList(); } private List<OffsetDateTime> deserializeListOfOffsetDateTime(Message amqpMessage) { if (amqpMessage.getBody() instanceof AmqpValue) { AmqpValue amqpValue = ((AmqpValue) amqpMessage.getBody()); if (amqpValue.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map<String, Object> responseBody = (Map<String, Object>) amqpValue.getValue(); Object expirationListObj = responseBody.get(ManagementConstants.EXPIRATIONS); if (expirationListObj instanceof Date[]) { return Arrays.stream((Date[]) expirationListObj) .map(date -> date.toInstant().atOffset(ZoneOffset.UTC)) .collect(Collectors.toList()); } } } return Collections.emptyList(); } @SuppressWarnings("rawtypes") private List<ServiceBusReceivedMessage> deserializeListOfMessages(Message amqpMessage) { final List<ServiceBusReceivedMessage> messageList = new ArrayList<>(); final AmqpResponseCode statusCode = RequestResponseUtils.getStatusCode(amqpMessage); if (statusCode != AmqpResponseCode.OK) { LOGGER.atWarning() .addKeyValue("statusCode", statusCode) .log("AMQP response did not contain OK status code."); return Collections.emptyList(); } final Object responseBodyMap = ((AmqpValue) amqpMessage.getBody()).getValue(); if (responseBodyMap == null) { LOGGER.warning("AMQP response did not contain a body."); return Collections.emptyList(); } else if (!(responseBodyMap instanceof Map)) { LOGGER.atWarning() .addKeyValue("expectedType", Map.class) .addKeyValue("actualType", responseBodyMap.getClass()) .log("AMQP response body is not correct instance."); return Collections.emptyList(); } final Object messages = ((Map) responseBodyMap).get(ManagementConstants.MESSAGES); if (messages == null) { LOGGER.atWarning().addKeyValue("expectedKey", ManagementConstants.MESSAGES) .log("AMQP response body did not contain key."); return Collections.emptyList(); } else if (!(messages instanceof Iterable)) { LOGGER.atWarning() .addKeyValue("expectedType", Iterable.class) .addKeyValue("actualType", messages.getClass()) .log("Response body contents is not the correct type."); return Collections.emptyList(); } for (Object message : (Iterable) messages) { if (!(message instanceof Map)) { LOGGER.atWarning() .addKeyValue("expectedType", Map.class) .addKeyValue("actualType", message.getClass()) .log("Message inside iterable of message is not correct type."); continue; } final Message responseMessage = Message.Factory.create(); final Binary messagePayLoad = (Binary) ((Map) message).get(ManagementConstants.MESSAGE); responseMessage.decode(messagePayLoad.getArray(), messagePayLoad.getArrayOffset(), messagePayLoad.getLength()); final ServiceBusReceivedMessage receivedMessage = deserializeMessage(responseMessage); if (((Map) message).containsKey(ManagementConstants.LOCK_TOKEN_KEY)) { receivedMessage.setLockToken((UUID) ((Map) message).get(ManagementConstants.LOCK_TOKEN_KEY)); } messageList.add(receivedMessage); } return messageList; } private ServiceBusReceivedMessage deserializeMessage(Message amqpMessage) { final Section body = amqpMessage.getBody(); AmqpMessageBody amqpMessageBody; if (body != null) { if (body instanceof Data) { final Binary messageData = ((Data) body).getValue(); amqpMessageBody = AmqpMessageBody.fromData(messageData.getArray()); } else if (body instanceof AmqpValue) { amqpMessageBody = AmqpMessageBody.fromValue(((AmqpValue) body).getValue()); } else if (body instanceof AmqpSequence) { @SuppressWarnings("unchecked") List<Object> messageData = ((AmqpSequence) body).getValue(); amqpMessageBody = AmqpMessageBody.fromSequence(messageData); } else { LOGGER.atWarning() .addKeyValue("actualType", body.getType()) .log("Message body is not correct. Not setting body contents."); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } } else { LOGGER.atWarning() .addKeyValue("actualType", "null") .log("Message body is not correct. Not setting body contents."); amqpMessageBody = AmqpMessageBody.fromData(EMPTY_BYTE_ARRAY); } final ServiceBusReceivedMessage brokeredMessage = new ServiceBusReceivedMessage(amqpMessageBody); AmqpAnnotatedMessage brokeredAmqpAnnotatedMessage = brokeredMessage.getRawAmqpMessage(); ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); if (applicationProperties != null) { final Map<String, Object> propertiesValue = convertToOriginType(applicationProperties.getValue()); brokeredAmqpAnnotatedMessage.getApplicationProperties().putAll(propertiesValue); } final AmqpMessageHeader brokeredHeader = brokeredAmqpAnnotatedMessage.getHeader(); brokeredHeader.setTimeToLive(Duration.ofMillis(amqpMessage.getTtl())); brokeredHeader.setDeliveryCount(amqpMessage.getDeliveryCount()); brokeredHeader.setDurable(amqpMessage.getHeader().getDurable()); brokeredHeader.setFirstAcquirer(amqpMessage.getHeader().getFirstAcquirer()); brokeredHeader.setPriority(amqpMessage.getPriority()); final Footer footer = amqpMessage.getFooter(); if (footer != null && footer.getValue() != null) { @SuppressWarnings("unchecked") final Map<Symbol, Object> footerValue = footer.getValue(); setValues(footerValue, brokeredAmqpAnnotatedMessage.getFooter()); } final AmqpMessageProperties brokeredProperties = brokeredAmqpAnnotatedMessage.getProperties(); brokeredProperties.setReplyToGroupId(amqpMessage.getReplyToGroupId()); final String replyTo = amqpMessage.getReplyTo(); if (replyTo != null) { brokeredProperties.setReplyTo(new AmqpAddress(amqpMessage.getReplyTo())); } final Object messageId = amqpMessage.getMessageId(); if (messageId != null) { brokeredProperties.setMessageId(new AmqpMessageId(messageId.toString())); } brokeredProperties.setContentType(amqpMessage.getContentType()); final Object correlationId = amqpMessage.getCorrelationId(); if (correlationId != null) { brokeredProperties.setCorrelationId(new AmqpMessageId(correlationId.toString())); } final Properties amqpProperties = amqpMessage.getProperties(); if (amqpProperties != null) { final String to = amqpProperties.getTo(); if (to != null) { brokeredProperties.setTo(new AmqpAddress(amqpProperties.getTo())); } if (amqpProperties.getAbsoluteExpiryTime() != null) { brokeredProperties.setAbsoluteExpiryTime(amqpProperties.getAbsoluteExpiryTime().toInstant() .atOffset(ZoneOffset.UTC)); } if (amqpProperties.getCreationTime() != null) { brokeredProperties.setCreationTime(amqpProperties.getCreationTime().toInstant() .atOffset(ZoneOffset.UTC)); } } brokeredProperties.setSubject(amqpMessage.getSubject()); brokeredProperties.setGroupId(amqpMessage.getGroupId()); brokeredProperties.setContentEncoding(amqpMessage.getContentEncoding()); brokeredProperties.setGroupSequence(amqpMessage.getGroupSequence()); brokeredProperties.setUserId(amqpMessage.getUserId()); final DeliveryAnnotations deliveryAnnotations = amqpMessage.getDeliveryAnnotations(); if (deliveryAnnotations != null) { setValues(deliveryAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getDeliveryAnnotations()); } final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); if (messageAnnotations != null) { setValues(messageAnnotations.getValue(), brokeredAmqpAnnotatedMessage.getMessageAnnotations()); } if (amqpMessage instanceof MessageWithDeliveryTag) { brokeredMessage.setLockToken(((MessageWithDeliveryTag) amqpMessage).getDeliveryTag()); } else if (amqpMessage instanceof MessageWithLockToken) { brokeredMessage.setLockToken(((MessageWithLockToken) amqpMessage).getLockToken()); } return brokeredMessage; } private static int getPayloadSize(Message msg) { if (msg == null || msg.getBody() == null) { return 0; } final Section bodySection = msg.getBody(); if (bodySection instanceof AmqpValue) { return sizeof(((AmqpValue) bodySection).getValue()); } else if (bodySection instanceof AmqpSequence) { return sizeof(((AmqpSequence) bodySection).getValue()); } else if (bodySection instanceof Data) { final Data payloadSection = (Data) bodySection; final Binary payloadBytes = payloadSection.getValue(); return sizeof(payloadBytes); } else { return 0; } } private void setValues(Map<Symbol, Object> sourceMap, Map<String, Object> targetMap) { if (sourceMap != null) { for (Map.Entry<Symbol, Object> entry : sourceMap.entrySet()) { targetMap.put(entry.getKey().toString(), entry.getValue()); } } } @SuppressWarnings("rawtypes") private static int sizeof(Object obj) { if (obj == null) { return 0; } if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Byte || obj instanceof UnsignedByte) { return Byte.BYTES; } if (obj instanceof Integer || obj instanceof UnsignedInteger) { return Integer.BYTES; } if (obj instanceof Long || obj instanceof UnsignedLong || obj instanceof Date) { return Long.BYTES; } if (obj instanceof Short || obj instanceof UnsignedShort) { return Short.BYTES; } if (obj instanceof Boolean) { return 1; } if (obj instanceof Character) { return 4; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } if (obj instanceof UUID) { return 16; } if (obj instanceof Decimal32) { return 4; } if (obj instanceof Decimal64) { return 8; } if (obj instanceof Decimal128) { return 16; } if (obj instanceof Binary) { return ((Binary) obj).getLength(); } if (obj instanceof Declare) { return 7; } if (obj instanceof Discharge) { Discharge discharge = (Discharge) obj; return 12 + discharge.getTxnId().getLength(); } if (obj instanceof ServiceBusDescribedType) { ServiceBusDescribedType describedType = (ServiceBusDescribedType) obj; return describedType.size(); } if (obj instanceof Map) { int size = 8; Map map = (Map) obj; for (Object value : map.keySet()) { size += sizeof(value); } for (Object value : map.values()) { size += sizeof(value); } return size; } if (obj instanceof Iterable) { int size = 8; for (Object innerObject : (Iterable) obj) { size += sizeof(innerObject); } return size; } if (obj.getClass().isArray()) { int size = 8; int length = Array.getLength(obj); for (int i = 0; i < length; i++) { size += sizeof(Array.get(obj, i)); } return size; } throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); } }
Good thing that older library versions will just ignore the returned `null` value and not add them to the result list. Otherwise, this would have been breaking.
private static CallAutomationEventBase parseSingleCloudEvent(String data, String eventType) { try { CallAutomationEventBase ret = null; ObjectMapper mapper = new ObjectMapper(); mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); JsonNode eventData = mapper.readTree(data); if (Objects.equals(eventType, "Microsoft.Communication.CallConnected")) { ret = mapper.convertValue(eventData, CallConnected.class); } else if (Objects.equals(eventType, "Microsoft.Communication.CallDisconnected")) { ret = mapper.convertValue(eventData, CallDisconnected.class); } else if (Objects.equals(eventType, "Microsoft.Communication.AddParticipantFailed")) { ret = mapper.convertValue(eventData, AddParticipantFailed.class); } else if (Objects.equals(eventType, "Microsoft.Communication.AddParticipantSucceeded")) { ret = mapper.convertValue(eventData, AddParticipantSucceeded.class); } else if (Objects.equals(eventType, "Microsoft.Communication.CallTransferAccepted")) { ret = mapper.convertValue(eventData, CallTransferAccepted.class); } else if (Objects.equals(eventType, "Microsoft.Communication.CallTransferFailed")) { ret = mapper.convertValue(eventData, CallTransferFailed.class); } else if (Objects.equals(eventType, "Microsoft.Communication.ParticipantsUpdated")) { ret = mapper.convertValue(eventData, ParticipantsUpdated.class); } else if (Objects.equals(eventType, "Microsoft.Communication.RecordingStateChanged")) { ret = mapper.convertValue(eventData, RecordingStateChanged.class); } else if (Objects.equals(eventType, "Microsoft.Communication.PlayStarted")) { ret = mapper.convertValue(eventData, PlayStarted.class); } else if (Objects.equals(eventType, "Microsoft.Communication.PlayCompleted")) { ret = mapper.convertValue(eventData, PlayCompleted.class); } else if (Objects.equals(eventType, "Microsoft.Communication.PlayFailed")) { ret = mapper.convertValue(eventData, PlayFailed.class); } else if (Objects.equals(eventType, "Microsoft.Communication.PlayCanceled")) { ret = mapper.convertValue(eventData, PlayCanceled.class); } else if (Objects.equals(eventType, "Microsoft.Communication.RecognizeCompleted")) { ret = mapper.convertValue(eventData, RecognizeCompleted.class); } else if (Objects.equals(eventType, "Microsoft.Communication.RecognizeFailed")) { ret = mapper.convertValue(eventData, RecognizeFailed.class); } else if (Objects.equals(eventType, "Microsoft.Communication.RecognizeCanceled")) { ret = mapper.convertValue(eventData, RecognizeCanceled.class); } else if (Objects.equals(eventType, "Microsoft.Communication.RemoveParticipantFailed")) { ret = mapper.convertValue(eventData, RemoveParticipantFailed.class); } else if (Objects.equals(eventType, "Microsoft.Communication.RemoveParticipantSucceeded")) { ret = mapper.convertValue(eventData, RemoveParticipantSucceeded.class); } else if (Objects.equals(eventType, "Microsoft.Communication.ContinuousDtmfRecognitionToneReceived")) { ret = mapper.convertValue(eventData, ContinuousDtmfRecognitionToneReceived.class); } else if (Objects.equals(eventType, "Microsoft.Communication.ContinuousDtmfRecognitionToneFailed")) { ret = mapper.convertValue(eventData, ContinuousDtmfRecognitionToneFailed.class); } else if (Objects.equals(eventType, "Microsoft.Communication.ContinuousDtmfRecognitionStopped")) { ret = mapper.convertValue(eventData, ContinuousDtmfRecognitionStopped.class); } else if (Objects.equals(eventType, "Microsoft.Communication.SendDtmfTonesCompleted")) { ret = mapper.convertValue(eventData, SendDtmfTonesCompleted.class); } else if (Objects.equals(eventType, "Microsoft.Communication.SendDtmfTonesFailed")) { ret = mapper.convertValue(eventData, SendDtmfTonesFailed.class); } else if (Objects.equals(eventType, "Microsoft.Communication.CancelAddParticipantSucceeded")) { ret = mapper.convertValue(eventData, CancelAddParticipantSucceeded.class); } else if (Objects.equals(eventType, "Microsoft.Communication.CancelAddParticipantFailed")) { ret = mapper.convertValue(eventData, CancelAddParticipantFailed.class); } else if (Objects.equals(eventType, "Microsoft.Communication.ConnectFailed")) { ret = mapper.convertValue(eventData, ConnectFailed.class); } else if (Objects.equals(eventType, "Microsoft.Communication.CreateCallFailed")) { ret = mapper.convertValue(eventData, CreateCallFailed.class); } else if (Objects.equals(eventType, "Microsoft.Communication.TranscriptionStarted")) { ret = mapper.convertValue(eventData, TranscriptionStarted.class); } else if (Objects.equals(eventType, "Microsoft.Communication.TranscriptionFailed")) { ret = mapper.convertValue(eventData, TranscriptionFailed.class); } else if (Objects.equals(eventType, "Microsoft.Communication.TranscriptionResumed")) { ret = mapper.convertValue(eventData, TranscriptionResumed.class); } else if (Objects.equals(eventType, "Microsoft.Communication.TranscriptionStopped")) { ret = mapper.convertValue(eventData, TranscriptionStopped.class); } else if (Objects.equals(eventType, "Microsoft.Communication.TranscriptionUpdated")) { ret = mapper.convertValue(eventData, TranscriptionUpdated.class); } else if (Objects.equals(eventType, "Microsoft.Communication.HoldFailed")) { ret = mapper.convertValue(eventData, HoldFailed.class); } else if (Objects.equals(eventType, "Microsoft.Communication.MediaStreamingStarted")) { ret = mapper.convertValue(eventData, MediaStreamingStarted.class); } else if (Objects.equals(eventType, "Microsoft.Communication.MediaStreamingStopped")) { ret = mapper.convertValue(eventData, MediaStreamingStopped.class); } else if (Objects.equals(eventType, "Microsoft.Communication.MediaStreamingFailed")) { ret = mapper.convertValue(eventData, MediaStreamingFailed.class); } return ret; } catch (RuntimeException e) { throw LOGGER.logExceptionAsError(e); } catch (JsonProcessingException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } }
ret = mapper.convertValue(eventData, CreateCallFailed.class);
private static CallAutomationEventBase parseSingleCloudEvent(String data, String eventType) { try (JsonReader jsonReader = JsonProviders.createReader(data)) { CallAutomationEventBase ret; if (Objects.equals(eventType, "Microsoft.Communication.CallConnected")) { ret = CallConnected.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.CallDisconnected")) { ret = CallDisconnected.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.AddParticipantFailed")) { ret = AddParticipantFailed.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.AddParticipantSucceeded")) { ret = AddParticipantSucceeded.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.CallTransferAccepted")) { ret = CallTransferAccepted.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.CallTransferFailed")) { ret = CallTransferFailed.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.ParticipantsUpdated")) { ret = ParticipantsUpdated.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.RecordingStateChanged")) { ret = RecordingStateChanged.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.PlayStarted")) { ret = PlayStarted.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.PlayCompleted")) { ret = PlayCompleted.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.PlayFailed")) { ret = PlayFailed.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.PlayCanceled")) { ret = PlayCanceled.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.RecognizeCompleted")) { ret = RecognizeCompleted.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.RecognizeFailed")) { ret = RecognizeFailed.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.RecognizeCanceled")) { ret = RecognizeCanceled.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.RemoveParticipantFailed")) { ret = RemoveParticipantFailed.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.RemoveParticipantSucceeded")) { ret = RemoveParticipantSucceeded.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.ContinuousDtmfRecognitionToneReceived")) { ret = ContinuousDtmfRecognitionToneReceived.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.ContinuousDtmfRecognitionToneFailed")) { ret = ContinuousDtmfRecognitionToneFailed.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.ContinuousDtmfRecognitionStopped")) { ret = ContinuousDtmfRecognitionStopped.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.SendDtmfTonesCompleted")) { ret = SendDtmfTonesCompleted.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.SendDtmfTonesFailed")) { ret = SendDtmfTonesFailed.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.CancelAddParticipantSucceeded")) { ret = CancelAddParticipantSucceeded.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.CancelAddParticipantFailed")) { ret = CancelAddParticipantFailed.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.ConnectFailed")) { ret = ConnectFailed.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.CreateCallFailed")) { ret = CreateCallFailed.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.TranscriptionStarted")) { ret = TranscriptionStarted.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.TranscriptionFailed")) { ret = TranscriptionFailed.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.TranscriptionResumed")) { ret = TranscriptionResumed.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.TranscriptionStopped")) { ret = TranscriptionStopped.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.TranscriptionUpdated")) { ret = TranscriptionUpdated.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.HoldFailed")) { ret = HoldFailed.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.MediaStreamingStarted")) { ret = MediaStreamingStarted.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.MediaStreamingStopped")) { ret = MediaStreamingStopped.fromJson(jsonReader); } else if (Objects.equals(eventType, "Microsoft.Communication.MediaStreamingFailed")) { ret = MediaStreamingFailed.fromJson(jsonReader); } else { ret = null; } return ret; } catch (IOException e) { throw new RuntimeException(e); } }
class CallAutomationEventParser { private static final ClientLogger LOGGER = new ClientLogger(CallAutomationEventParser.class); /*** * Returns a list of events from request's body. * * @param requestBody Body of the event request. * @throws RuntimeException Any exceptions occurs at runtime. * @return a list of CallAutomationEventBase */ public static List<CallAutomationEventBase> parseEvents(String requestBody) { List<CallAutomationEventBase> callAutomationBaseEvents; callAutomationBaseEvents = parseCloudEventList(requestBody); return callAutomationBaseEvents; } private static List<CallAutomationEventBase> parseCloudEventList(String requestBody) { try { List<CloudEvent> cloudEvents; List<CallAutomationEventBase> callAutomationBaseEvents = new ArrayList<>(); try { cloudEvents = CloudEvent.fromString(requestBody); } catch (RuntimeException e) { return callAutomationBaseEvents; } for (CloudEvent cloudEvent : cloudEvents) { CallAutomationEventBase temp = parseSingleCloudEvent(cloudEvent.getData().toString(), cloudEvent.getType()); if (temp != null) { callAutomationBaseEvents.add(temp); } } return callAutomationBaseEvents; } catch (RuntimeException e) { throw LOGGER.logExceptionAsError(e); } } }
class CallAutomationEventParser { private static final ClientLogger LOGGER = new ClientLogger(CallAutomationEventParser.class); /*** * Returns a list of events from request's body. * * @param requestBody Body of the event request. * @throws RuntimeException Any exceptions occurs at runtime. * @return a list of CallAutomationEventBase */ public static List<CallAutomationEventBase> parseEvents(String requestBody) { List<CallAutomationEventBase> callAutomationBaseEvents; callAutomationBaseEvents = parseCloudEventList(requestBody); return callAutomationBaseEvents; } private static List<CallAutomationEventBase> parseCloudEventList(String requestBody) { try { List<CloudEvent> cloudEvents; List<CallAutomationEventBase> callAutomationBaseEvents = new ArrayList<>(); try { cloudEvents = CloudEvent.fromString(requestBody); } catch (RuntimeException e) { return callAutomationBaseEvents; } for (CloudEvent cloudEvent : cloudEvents) { CallAutomationEventBase temp = parseSingleCloudEvent(cloudEvent.getData().toString(), cloudEvent.getType()); if (temp != null) { callAutomationBaseEvents.add(temp); } } return callAutomationBaseEvents; } catch (RuntimeException e) { throw LOGGER.logExceptionAsError(e); } } }
```suggestion if (!connectionString.contains("InstrumentationKey=")) { ```
AutoConfigurationCustomizerProvider autoConfigurationCustomizerProvider() { return new AutoConfigurationCustomizerProvider() { @Override public void customize(AutoConfigurationCustomizer autoConfigurationCustomizer) { if (!isNativeRuntimeExecution() && applicationInsightsAgentIsAttached()) { LOG.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled." + " You can remove this message by adding the otel.sdk.disabled=true property."); autoConfigurationCustomizer.addPropertiesCustomizer(OTEL_DISABLE_CONFIG); return; } if (connectionString == null || connectionString.isEmpty()) { LOG.warn("Unable to find the Application Insights connection string. The telemetry data won't be sent to Azure."); autoConfigurationCustomizer.addPropertiesCustomizer(NO_EXPORT_CONFIG); return; } if (!connectionString.startsWith("InstrumentationKey=")) { throw new WrongConnectionStringException(); } if (autoConfigurationCustomizer instanceof AutoConfiguredOpenTelemetrySdkBuilder) { AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder = (AutoConfiguredOpenTelemetrySdkBuilder) autoConfigurationCustomizer; AzureMonitorExporterBuilder azureMonitorExporterBuilder = new AzureMonitorExporterBuilder().connectionString(connectionString); HttpPipeline providedHttpPipeline = httpPipeline.getIfAvailable(); if (providedHttpPipeline != null) { azureMonitorExporterBuilder = azureMonitorExporterBuilder.httpPipeline(providedHttpPipeline); } azureMonitorExporterBuilder.install(sdkBuilder); } } }; }
if (!connectionString.startsWith("InstrumentationKey=")) {
AutoConfigurationCustomizerProvider autoConfigurationCustomizerProvider() { return new AutoConfigurationCustomizerProvider() { @Override public void customize(AutoConfigurationCustomizer autoConfigurationCustomizer) { if (!isNativeRuntimeExecution() && applicationInsightsAgentIsAttached()) { LOG.warn("The spring-cloud-azure-starter-monitor Spring starter is disabled because the Application Insights Java agent is enabled." + " You can remove this message by adding the otel.sdk.disabled=true property."); autoConfigurationCustomizer.addPropertiesCustomizer(OTEL_DISABLE_CONFIG); return; } if (connectionString == null || connectionString.isEmpty()) { LOG.warn("Unable to find the Application Insights connection string. The telemetry data won't be sent to Azure."); autoConfigurationCustomizer.addPropertiesCustomizer(NO_EXPORT_CONFIG); return; } if (!connectionString.contains("InstrumentationKey=")) { throw new WrongConnectionStringException(); } if (autoConfigurationCustomizer instanceof AutoConfiguredOpenTelemetrySdkBuilder) { AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder = (AutoConfiguredOpenTelemetrySdkBuilder) autoConfigurationCustomizer; AzureMonitorExporterBuilder azureMonitorExporterBuilder = new AzureMonitorExporterBuilder().connectionString(connectionString); HttpPipeline providedHttpPipeline = httpPipeline.getIfAvailable(); if (providedHttpPipeline != null) { azureMonitorExporterBuilder = azureMonitorExporterBuilder.httpPipeline(providedHttpPipeline); } azureMonitorExporterBuilder.install(sdkBuilder); } } }; }
class AzureSpringMonitorAutoConfig { private static final Logger LOG = LoggerFactory.getLogger(AzureSpringMonitorAutoConfig.class); private static final Function<ConfigProperties, Map<String, String>> OTEL_DISABLE_CONFIG = configProperties -> { Map<String, String> properties = new HashMap<>(); properties.put("otel.sdk.disabled", "true"); return properties; }; private static final Function<ConfigProperties, Map<String, String>> NO_EXPORT_CONFIG = configProperties -> { Map<String, String> properties = new HashMap<>(3); properties.put("otel.traces.exporter", "none"); properties.put("otel.metrics.exporter", "none"); properties.put("otel.logs.exporter", "none"); return properties; }; private final String connectionString; private final ObjectProvider<HttpPipeline> httpPipeline; /** * Create an instance of AzureSpringMonitorConfig * * @param connectionString connection string system property * @param httpPipeline an instance of HttpPipeline */ public AzureSpringMonitorAutoConfig(@Value("${applicationinsights.connection.string:}") String connectionString, ObjectProvider<HttpPipeline> httpPipeline) { this.connectionString = connectionString; this.httpPipeline = httpPipeline; if (!isNativeRuntimeExecution()) { LOG.warn("You are using Application Insights for Spring in a non-native GraalVM runtime environment. We recommend using the Application Insights Java agent."); } } private static boolean isNativeRuntimeExecution() { String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode"); return imageCode != null; } private static boolean applicationInsightsAgentIsAttached() { try { Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null); return true; } catch (ClassNotFoundException e) { return false; } } @Bean /** * Declare OpenTelemetryVersionCheckRunner bean to check the OpenTelemetry version * * @return OpenTelemetryVersionCheckRunner */ @Bean public OpenTelemetryVersionCheckRunner openTelemetryVersionCheckRunner() { return new OpenTelemetryVersionCheckRunner(); } }
class AzureSpringMonitorAutoConfig { private static final Logger LOG = LoggerFactory.getLogger(AzureSpringMonitorAutoConfig.class); private static final Function<ConfigProperties, Map<String, String>> OTEL_DISABLE_CONFIG = configProperties -> { Map<String, String> properties = new HashMap<>(); properties.put("otel.sdk.disabled", "true"); return properties; }; private static final Function<ConfigProperties, Map<String, String>> NO_EXPORT_CONFIG = configProperties -> { Map<String, String> properties = new HashMap<>(3); properties.put("otel.traces.exporter", "none"); properties.put("otel.metrics.exporter", "none"); properties.put("otel.logs.exporter", "none"); return properties; }; private final String connectionString; private final ObjectProvider<HttpPipeline> httpPipeline; /** * Create an instance of AzureSpringMonitorConfig * * @param connectionString connection string system property * @param httpPipeline an instance of HttpPipeline */ public AzureSpringMonitorAutoConfig(@Value("${applicationinsights.connection.string:}") String connectionString, ObjectProvider<HttpPipeline> httpPipeline) { this.connectionString = connectionString; this.httpPipeline = httpPipeline; if (!isNativeRuntimeExecution()) { LOG.warn("You are using Application Insights for Spring in a non-native GraalVM runtime environment. We recommend using the Application Insights Java agent."); } } private static boolean isNativeRuntimeExecution() { String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode"); return imageCode != null; } private static boolean applicationInsightsAgentIsAttached() { try { Class.forName("com.microsoft.applicationinsights.agent.Agent", false, null); return true; } catch (ClassNotFoundException e) { return false; } } @Bean /** * Declare OpenTelemetryVersionCheckRunner bean to check the OpenTelemetry version * * @return OpenTelemetryVersionCheckRunner */ @Bean public OpenTelemetryVersionCheckRunner openTelemetryVersionCheckRunner() { return new OpenTelemetryVersionCheckRunner(); } }
I think in longer run we should be good to make a breaking change and remove older APIs before test proxy. Now that we have the migration done.
public void close() { if (allowedToRecordValues) { if (testProxyEnabled) { testProxyRecordPolicy.stopRecording(proxyVariableQueue); } else { try (BufferedWriter writer = Files.newBufferedWriter(createRecordFile(playbackRecordName).toPath()); JsonWriter jsonWriter = JsonProviders.createWriter(writer)) { jsonWriter.writeJson(recordedData).flush(); } catch (IOException ex) { throw LOGGER .logExceptionAsError(new UncheckedIOException("Unable to write data to playback file.", ex)); } } } else if (isPlaybackMode() && testProxyEnabled && allowedToReadRecordedValues) { testProxyPlaybackClient.stopPlayback(); } }
try (BufferedWriter writer = Files.newBufferedWriter(createRecordFile(playbackRecordName).toPath());
public void close() { if (allowedToRecordValues) { if (testProxyEnabled) { testProxyRecordPolicy.stopRecording(proxyVariableQueue); } else { try (BufferedWriter writer = Files.newBufferedWriter(createRecordFile(playbackRecordName).toPath()); JsonWriter jsonWriter = JsonProviders.createWriter(writer)) { jsonWriter.writeJson(recordedData).flush(); } catch (IOException ex) { throw LOGGER .logExceptionAsError(new UncheckedIOException("Unable to write data to playback file.", ex)); } } } else if (isPlaybackMode() && testProxyEnabled && allowedToReadRecordedValues) { testProxyPlaybackClient.stopPlayback(); } }
class InterceptorManager implements AutoCloseable { private static final ClientLogger LOGGER = new ClientLogger(InterceptorManager.class); private final Map<String, String> textReplacementRules; private final String testName; private final String playbackRecordName; private final TestMode testMode; private final boolean allowedToReadRecordedValues; private final boolean allowedToRecordValues; private final RecordedData recordedData; private final boolean testProxyEnabled; private final boolean skipRecordingRequestBody; private TestProxyRecordPolicy testProxyRecordPolicy; private TestProxyPlaybackClient testProxyPlaybackClient; private final Queue<String> proxyVariableQueue = new LinkedList<>(); private HttpClient httpClient; private final Path testClassPath; private String xRecordingFileLocation; /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param testMode The {@link TestMode} for this interceptor. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, TestMode testMode) { this(testName, testName, testMode, false, false, false, null); } /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * <li>If {@code testMode} is {@link TestMode * record.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testContextManager Contextual information about the test being ran, such as test name, {@link TestMode}, * and others. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. */ public InterceptorManager(TestContextManager testContextManager) { this(testContextManager.getTestName(), testContextManager.getTestPlaybackRecordingName(), testContextManager.getTestMode(), testContextManager.doNotRecordTest(), testContextManager.isTestProxyEnabled(), testContextManager.skipRecordingRequestBody(), testContextManager.getTestClassPath()); } private InterceptorManager(String testName, String playbackRecordName, TestMode testMode, boolean doNotRecord, boolean enableTestProxy, boolean skipRecordingRequestBody, Path testClassPath) { this.testProxyEnabled = enableTestProxy; Objects.requireNonNull(testName, "'testName' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = testMode; this.textReplacementRules = new HashMap<>(); this.skipRecordingRequestBody = skipRecordingRequestBody; this.testClassPath = testClassPath; this.allowedToReadRecordedValues = (testMode == TestMode.PLAYBACK && !doNotRecord); this.allowedToRecordValues = (testMode == TestMode.RECORD && !doNotRecord); if (!enableTestProxy && allowedToReadRecordedValues) { this.recordedData = readDataFromFile(); } else if (!enableTestProxy && allowedToRecordValues) { this.recordedData = new RecordedData(); } else { this.recordedData = null; } } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules) { this(testName, textReplacementRules, false, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord) { this(testName, textReplacementRules, doNotRecord, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @param playbackRecordName Full name of the test including its iteration, used as the playback record name. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. */ public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord, String playbackRecordName) { Objects.requireNonNull(testName, "'testName' cannot be null."); Objects.requireNonNull(textReplacementRules, "'textReplacementRules' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = TestMode.PLAYBACK; this.allowedToReadRecordedValues = !doNotRecord; this.allowedToRecordValues = false; this.testProxyEnabled = false; this.skipRecordingRequestBody = false; this.testClassPath = null; this.recordedData = allowedToReadRecordedValues ? readDataFromFile() : null; this.textReplacementRules = textReplacementRules; } /** * Gets whether this InterceptorManager is in playback mode. * * @return true if the InterceptorManager is in playback mode and false otherwise. */ public boolean isPlaybackMode() { return testMode == TestMode.PLAYBACK; } /** * Gets whether this InterceptorManager is in live mode. * * @return true if the InterceptorManager is in live mode and false otherwise. */ public boolean isLiveMode() { return testMode == TestMode.LIVE; } /** * Gets whether this InterceptorManager is in record mode. * * @return true if the InterceptorManager is in record mode and false otherwise. */ public boolean isRecordMode() { return testMode == TestMode.RECORD; } /** * Gets the recorded data InterceptorManager is keeping track of. * * @return The recorded data managed by InterceptorManager. */ public RecordedData getRecordedData() { return recordedData; } /** * A {@link Supplier} for retrieving a variable from a test proxy recording. * * @return The supplier for retrieving a variable. */ public Supplier<String> getProxyVariableSupplier() { return () -> { Objects.requireNonNull(this.testProxyPlaybackClient, "Playback must be started to retrieve values"); if (!CoreUtils.isNullOrEmpty(proxyVariableQueue)) { return proxyVariableQueue.remove(); } else { throw LOGGER.logExceptionAsError(new RuntimeException("'proxyVariableQueue' cannot be null or empty.")); } }; } /** * Get a {@link Consumer} for adding variables used in test proxy tests. * * @return The consumer for adding a variable. */ public Consumer<String> getProxyVariableConsumer() { return proxyVariableQueue::add; } /** * Gets a new HTTP pipeline policy that records network calls and its data is managed by * {@link InterceptorManager}. * * @return HttpPipelinePolicy to record network calls. * @throws IllegalStateException A recording policy was requested when the test proxy is enabled and test mode is not RECORD. */ public HttpPipelinePolicy getRecordPolicy() { if (testProxyEnabled) { return getProxyRecordingPolicy(); } return getRecordPolicy(Collections.emptyList()); } /** * Gets a new HTTP pipeline policy that records network calls. The recorded content is redacted by the given list of * redactor functions to hide sensitive information. * * @param recordingRedactors The custom redactor functions that are applied in addition to the default redactor * functions defined in {@link RecordingRedactor}. * @return {@link HttpPipelinePolicy} to record network calls. * @throws IllegalStateException A recording policy was requested when the test proxy is enabled and test mode is not RECORD. */ public HttpPipelinePolicy getRecordPolicy(List<Function<String, String>> recordingRedactors) { if (testProxyEnabled) { return getProxyRecordingPolicy(); } return new RecordNetworkCallPolicy(recordedData, recordingRedactors); } /** * Gets a new HTTP client that plays back test session records managed by {@link InterceptorManager}. * * @return An HTTP client that plays back network calls from its recorded data. * @throws IllegalStateException A playback client was requested when the test proxy is enabled and test mode is LIVE. */ public HttpClient getPlaybackClient() { if (testProxyEnabled) { if (!isPlaybackMode()) { throw new IllegalStateException("A playback client can only be requested in PLAYBACK mode."); } if (testProxyPlaybackClient == null) { testProxyPlaybackClient = new TestProxyPlaybackClient(httpClient, skipRecordingRequestBody); proxyVariableQueue .addAll(testProxyPlaybackClient.startPlayback(getTestProxyRecordFile(), testClassPath)); xRecordingFileLocation = testProxyPlaybackClient.getRecordingFileLocation(); } return testProxyPlaybackClient; } else { return new PlaybackClient(recordedData, textReplacementRules); } } /** * Disposes of resources used by this InterceptorManager. * * If {@code testMode} is {@link TestMode * "<i>session-records/{@code testName}.json</i>" */ @Override private RecordedData readDataFromFile() { File recordFile = getRecordFile(); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath()); JsonReader jsonReader = JsonProviders.createReader(reader)) { return RecordedData.fromJson(jsonReader); } catch (IOException ex) { throw LOGGER.logExceptionAsWarning(new UncheckedIOException(ex)); } } private HttpPipelinePolicy getProxyRecordingPolicy() { if (testProxyRecordPolicy == null) { if (!isRecordMode()) { throw new IllegalStateException("A recording policy can only be requested in RECORD mode."); } testProxyRecordPolicy = new TestProxyRecordPolicy(httpClient, skipRecordingRequestBody); testProxyRecordPolicy.startRecording(getTestProxyRecordFile(), testClassPath); } return testProxyRecordPolicy; } /** * Computes the relative path of the record file to the repo root. * * @return A {@link File} with the partial path to where the record file lives. */ private File getTestProxyRecordFile() { Path repoRoot = TestUtils.getRepoRootResolveUntil(testClassPath, "eng"); Path targetFolderRoot = TestUtils.getRepoRootResolveUntil(testClassPath, "target"); Path filePath = Paths.get(targetFolderRoot.toString(), "src/test/resources/session-records", playbackRecordName + ".json"); return repoRoot.relativize(filePath).toFile(); } /* * Attempts to retrieve the playback file, if it is not found an exception is thrown as playback can't continue. */ private File getRecordFile() { File recordFolder = TestUtils.getRecordFolder(); File playbackFile = new File(recordFolder, playbackRecordName + ".json"); File oldPlaybackFile = new File(recordFolder, testName + ".json"); if (!playbackFile.exists() && !oldPlaybackFile.exists()) { throw LOGGER.logExceptionAsError( new RuntimeException(String.format("Missing both new and old playback files. Files are %s and %s.", playbackFile.getPath(), oldPlaybackFile.getPath()))); } if (playbackFile.exists()) { LOGGER.info("==> Playback file path: {}", playbackFile.getPath()); return playbackFile; } else { LOGGER.info("==> Playback file path: {}", oldPlaybackFile.getPath()); return oldPlaybackFile; } } /* * Retrieves or creates the file that will be used to store the recorded test values. */ private File createRecordFile(String testName) throws IOException { File recordFolder = TestUtils.getRecordFolder(); if (!recordFolder.exists()) { if (recordFolder.mkdir()) { LOGGER.verbose("Created directory: {}", recordFolder.getPath()); } } File recordFile = new File(recordFolder, testName + ".json"); if (recordFile.createNewFile()) { LOGGER.verbose("Created record file: {}", recordFile.getPath()); } LOGGER.info("==> Playback file path: " + recordFile); return recordFile; } /** * Add text replacement rule (regex as key, the replacement text as value) into * {@link InterceptorManager * * @param regex the pattern to locate the position of replacement * @param replacement the replacement text */ public void addTextReplacementRule(String regex, String replacement) { textReplacementRules.put(regex, replacement); } /** * Add sanitizer rule for sanitization during record or playback. * * @param testProxySanitizers the list of replacement regex and rules. * @throws RuntimeException Neither playback or record has started. */ public void addSanitizers(List<TestProxySanitizer> testProxySanitizers) { if (CoreUtils.isNullOrEmpty(testProxySanitizers)) { return; } if (testProxyPlaybackClient != null) { testProxyPlaybackClient.addProxySanitization(testProxySanitizers); } else if (testProxyRecordPolicy != null) { testProxyRecordPolicy.addProxySanitization(testProxySanitizers); } else { throw new RuntimeException("Playback or record must have been started before adding sanitizers."); } } /** * Disable common sanitizer rule for sanitization during record or playback. * * @param testProxySanitizersId the list of sanitizer rule Id to disable. * @throws RuntimeException Neither playback or record has started. */ public void removeSanitizers(String... testProxySanitizersId) { if (CoreUtils.isNullOrEmpty(testProxySanitizersId)) { return; } if (testProxyPlaybackClient != null) { testProxyPlaybackClient.removeProxySanitization(Arrays.asList(testProxySanitizersId)); } else if (testProxyRecordPolicy != null) { testProxyRecordPolicy.removeProxySanitization(Arrays.asList(testProxySanitizersId)); } else { throw new RuntimeException("Playback or record must have been started before removing sanitizers."); } } /** * Add sanitizer rule for sanitization during record or playback. * * @param testProxySanitizers the list of replacement regex and rules. */ public void addSanitizers(TestProxySanitizer... testProxySanitizers) { if (testProxySanitizers != null) { addSanitizers(Arrays.asList(testProxySanitizers)); } } /** * Add matcher rules to match recorded data in playback. * Matchers are only applied for playback session and so this will be a noop when invoked in RECORD/LIVE mode. * * @param testProxyMatchers the list of matcher rules when playing back recorded data. * @throws RuntimeException Playback has not started. */ public void addMatchers(List<TestProxyRequestMatcher> testProxyMatchers) { if (CoreUtils.isNullOrEmpty(testProxyMatchers)) { return; } if (testMode != TestMode.PLAYBACK) { return; } if (testProxyPlaybackClient != null) { testProxyPlaybackClient.addMatcherRequests(testProxyMatchers); } else { throw new RuntimeException("Playback must have been started before adding matchers."); } } /** * Add matcher rules to match recorded data in playback. * Matchers are only applied for playback session and so this will be a noop when invoked in RECORD/LIVE mode. * * @param testProxyRequestMatchers the list of matcher rules when playing back recorded data. */ public void addMatchers(TestProxyRequestMatcher... testProxyRequestMatchers) { if (testProxyRequestMatchers != null) { addMatchers(Arrays.asList(testProxyRequestMatchers)); } } /** * Get the recording file location in assets repo. * * @return the assets repo location of the recording file. */ public String getRecordingFileLocation() { return xRecordingFileLocation; } /** * Sets the httpClient to be used for this test. * * @param httpClient The {@link HttpClient} implementation to use. */ void setHttpClient(HttpClient httpClient) { this.httpClient = httpClient; } /** * Sets the recording options for the proxy. * * @param testProxyRecordingOptions The {@link TestProxyRecordingOptions} to use. * @throws RuntimeException if test mode is not record. */ public void setProxyRecordingOptions(TestProxyRecordingOptions testProxyRecordingOptions) { if (testMode != TestMode.RECORD) { return; } if (testProxyRecordPolicy != null) { testProxyRecordPolicy.setRecordingOptions(testProxyRecordingOptions); } else { throw new RuntimeException("Recording must have been started before setting recording options."); } } /** * Gets the name of the running test. * * @return Name of the running test. */ public String getTestName() { return testName; } /** * Gets the name of the playback record. * <p> * The playback record name is equivalent to {@code <testClass>.<testMethod>[<testIteration>]}. * * @return Name of the playback record. */ public String getPlaybackRecordName() { return playbackRecordName; } }
class InterceptorManager implements AutoCloseable { private static final ClientLogger LOGGER = new ClientLogger(InterceptorManager.class); private final Map<String, String> textReplacementRules; private final String testName; private final String playbackRecordName; private final TestMode testMode; private final boolean allowedToReadRecordedValues; private final boolean allowedToRecordValues; private final RecordedData recordedData; private final boolean testProxyEnabled; private final boolean skipRecordingRequestBody; private TestProxyRecordPolicy testProxyRecordPolicy; private TestProxyPlaybackClient testProxyPlaybackClient; private final Queue<String> proxyVariableQueue = new LinkedList<>(); private HttpClient httpClient; private final Path testClassPath; private String xRecordingFileLocation; /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param testMode The {@link TestMode} for this interceptor. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, TestMode testMode) { this(testName, testName, testMode, false, false, false, null); } /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * <li>If {@code testMode} is {@link TestMode * record.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testContextManager Contextual information about the test being ran, such as test name, {@link TestMode}, * and others. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. */ public InterceptorManager(TestContextManager testContextManager) { this(testContextManager.getTestName(), testContextManager.getTestPlaybackRecordingName(), testContextManager.getTestMode(), testContextManager.doNotRecordTest(), testContextManager.isTestProxyEnabled(), testContextManager.skipRecordingRequestBody(), testContextManager.getTestClassPath()); } private InterceptorManager(String testName, String playbackRecordName, TestMode testMode, boolean doNotRecord, boolean enableTestProxy, boolean skipRecordingRequestBody, Path testClassPath) { this.testProxyEnabled = enableTestProxy; Objects.requireNonNull(testName, "'testName' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = testMode; this.textReplacementRules = new HashMap<>(); this.skipRecordingRequestBody = skipRecordingRequestBody; this.testClassPath = testClassPath; this.allowedToReadRecordedValues = (testMode == TestMode.PLAYBACK && !doNotRecord); this.allowedToRecordValues = (testMode == TestMode.RECORD && !doNotRecord); if (!enableTestProxy && allowedToReadRecordedValues) { this.recordedData = readDataFromFile(); } else if (!enableTestProxy && allowedToRecordValues) { this.recordedData = new RecordedData(); } else { this.recordedData = null; } } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules) { this(testName, textReplacementRules, false, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord) { this(testName, textReplacementRules, doNotRecord, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @param playbackRecordName Full name of the test including its iteration, used as the playback record name. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. */ public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord, String playbackRecordName) { Objects.requireNonNull(testName, "'testName' cannot be null."); Objects.requireNonNull(textReplacementRules, "'textReplacementRules' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = TestMode.PLAYBACK; this.allowedToReadRecordedValues = !doNotRecord; this.allowedToRecordValues = false; this.testProxyEnabled = false; this.skipRecordingRequestBody = false; this.testClassPath = null; this.recordedData = allowedToReadRecordedValues ? readDataFromFile() : null; this.textReplacementRules = textReplacementRules; } /** * Gets whether this InterceptorManager is in playback mode. * * @return true if the InterceptorManager is in playback mode and false otherwise. */ public boolean isPlaybackMode() { return testMode == TestMode.PLAYBACK; } /** * Gets whether this InterceptorManager is in live mode. * * @return true if the InterceptorManager is in live mode and false otherwise. */ public boolean isLiveMode() { return testMode == TestMode.LIVE; } /** * Gets whether this InterceptorManager is in record mode. * * @return true if the InterceptorManager is in record mode and false otherwise. */ public boolean isRecordMode() { return testMode == TestMode.RECORD; } /** * Gets the recorded data InterceptorManager is keeping track of. * * @return The recorded data managed by InterceptorManager. */ public RecordedData getRecordedData() { return recordedData; } /** * A {@link Supplier} for retrieving a variable from a test proxy recording. * * @return The supplier for retrieving a variable. */ public Supplier<String> getProxyVariableSupplier() { return () -> { Objects.requireNonNull(this.testProxyPlaybackClient, "Playback must be started to retrieve values"); if (!CoreUtils.isNullOrEmpty(proxyVariableQueue)) { return proxyVariableQueue.remove(); } else { throw LOGGER.logExceptionAsError(new RuntimeException("'proxyVariableQueue' cannot be null or empty.")); } }; } /** * Get a {@link Consumer} for adding variables used in test proxy tests. * * @return The consumer for adding a variable. */ public Consumer<String> getProxyVariableConsumer() { return proxyVariableQueue::add; } /** * Gets a new HTTP pipeline policy that records network calls and its data is managed by * {@link InterceptorManager}. * * @return HttpPipelinePolicy to record network calls. * @throws IllegalStateException A recording policy was requested when the test proxy is enabled and test mode is not RECORD. */ public HttpPipelinePolicy getRecordPolicy() { if (testProxyEnabled) { return getProxyRecordingPolicy(); } return getRecordPolicy(Collections.emptyList()); } /** * Gets a new HTTP pipeline policy that records network calls. The recorded content is redacted by the given list of * redactor functions to hide sensitive information. * * @param recordingRedactors The custom redactor functions that are applied in addition to the default redactor * functions defined in {@link RecordingRedactor}. * @return {@link HttpPipelinePolicy} to record network calls. * @throws IllegalStateException A recording policy was requested when the test proxy is enabled and test mode is not RECORD. */ public HttpPipelinePolicy getRecordPolicy(List<Function<String, String>> recordingRedactors) { if (testProxyEnabled) { return getProxyRecordingPolicy(); } return new RecordNetworkCallPolicy(recordedData, recordingRedactors); } /** * Gets a new HTTP client that plays back test session records managed by {@link InterceptorManager}. * * @return An HTTP client that plays back network calls from its recorded data. * @throws IllegalStateException A playback client was requested when the test proxy is enabled and test mode is LIVE. */ public HttpClient getPlaybackClient() { if (testProxyEnabled) { if (!isPlaybackMode()) { throw new IllegalStateException("A playback client can only be requested in PLAYBACK mode."); } if (testProxyPlaybackClient == null) { testProxyPlaybackClient = new TestProxyPlaybackClient(httpClient, skipRecordingRequestBody); proxyVariableQueue .addAll(testProxyPlaybackClient.startPlayback(getTestProxyRecordFile(), testClassPath)); xRecordingFileLocation = testProxyPlaybackClient.getRecordingFileLocation(); } return testProxyPlaybackClient; } else { return new PlaybackClient(recordedData, textReplacementRules); } } /** * Disposes of resources used by this InterceptorManager. * * If {@code testMode} is {@link TestMode * "<i>session-records/{@code testName}.json</i>" */ @Override private RecordedData readDataFromFile() { File recordFile = getRecordFile(); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath()); JsonReader jsonReader = JsonProviders.createReader(reader)) { return RecordedData.fromJson(jsonReader); } catch (IOException ex) { throw LOGGER.logExceptionAsWarning(new UncheckedIOException(ex)); } } private HttpPipelinePolicy getProxyRecordingPolicy() { if (testProxyRecordPolicy == null) { if (!isRecordMode()) { throw new IllegalStateException("A recording policy can only be requested in RECORD mode."); } testProxyRecordPolicy = new TestProxyRecordPolicy(httpClient, skipRecordingRequestBody); testProxyRecordPolicy.startRecording(getTestProxyRecordFile(), testClassPath); } return testProxyRecordPolicy; } /** * Computes the relative path of the record file to the repo root. * * @return A {@link File} with the partial path to where the record file lives. */ private File getTestProxyRecordFile() { Path repoRoot = TestUtils.getRepoRootResolveUntil(testClassPath, "eng"); Path targetFolderRoot = TestUtils.getRepoRootResolveUntil(testClassPath, "target"); Path filePath = Paths.get(targetFolderRoot.toString(), "src/test/resources/session-records", playbackRecordName + ".json"); return repoRoot.relativize(filePath).toFile(); } /* * Attempts to retrieve the playback file, if it is not found an exception is thrown as playback can't continue. */ private File getRecordFile() { File recordFolder = TestUtils.getRecordFolder(); File playbackFile = new File(recordFolder, playbackRecordName + ".json"); File oldPlaybackFile = new File(recordFolder, testName + ".json"); if (!playbackFile.exists() && !oldPlaybackFile.exists()) { throw LOGGER.logExceptionAsError( new RuntimeException(String.format("Missing both new and old playback files. Files are %s and %s.", playbackFile.getPath(), oldPlaybackFile.getPath()))); } if (playbackFile.exists()) { LOGGER.info("==> Playback file path: {}", playbackFile.getPath()); return playbackFile; } else { LOGGER.info("==> Playback file path: {}", oldPlaybackFile.getPath()); return oldPlaybackFile; } } /* * Retrieves or creates the file that will be used to store the recorded test values. */ private File createRecordFile(String testName) throws IOException { File recordFolder = TestUtils.getRecordFolder(); if (!recordFolder.exists()) { if (recordFolder.mkdir()) { LOGGER.verbose("Created directory: {}", recordFolder.getPath()); } } File recordFile = new File(recordFolder, testName + ".json"); if (recordFile.createNewFile()) { LOGGER.verbose("Created record file: {}", recordFile.getPath()); } LOGGER.info("==> Playback file path: " + recordFile); return recordFile; } /** * Add text replacement rule (regex as key, the replacement text as value) into * {@link InterceptorManager * * @param regex the pattern to locate the position of replacement * @param replacement the replacement text */ public void addTextReplacementRule(String regex, String replacement) { textReplacementRules.put(regex, replacement); } /** * Add sanitizer rule for sanitization during record or playback. * * @param testProxySanitizers the list of replacement regex and rules. * @throws RuntimeException Neither playback or record has started. */ public void addSanitizers(List<TestProxySanitizer> testProxySanitizers) { if (CoreUtils.isNullOrEmpty(testProxySanitizers)) { return; } if (testProxyPlaybackClient != null) { testProxyPlaybackClient.addProxySanitization(testProxySanitizers); } else if (testProxyRecordPolicy != null) { testProxyRecordPolicy.addProxySanitization(testProxySanitizers); } else { throw new RuntimeException("Playback or record must have been started before adding sanitizers."); } } /** * Disable common sanitizer rule for sanitization during record or playback. * * @param testProxySanitizersId the list of sanitizer rule Id to disable. * @throws RuntimeException Neither playback or record has started. */ public void removeSanitizers(String... testProxySanitizersId) { if (CoreUtils.isNullOrEmpty(testProxySanitizersId)) { return; } if (testProxyPlaybackClient != null) { testProxyPlaybackClient.removeProxySanitization(Arrays.asList(testProxySanitizersId)); } else if (testProxyRecordPolicy != null) { testProxyRecordPolicy.removeProxySanitization(Arrays.asList(testProxySanitizersId)); } else { throw new RuntimeException("Playback or record must have been started before removing sanitizers."); } } /** * Add sanitizer rule for sanitization during record or playback. * * @param testProxySanitizers the list of replacement regex and rules. */ public void addSanitizers(TestProxySanitizer... testProxySanitizers) { if (testProxySanitizers != null) { addSanitizers(Arrays.asList(testProxySanitizers)); } } /** * Add matcher rules to match recorded data in playback. * Matchers are only applied for playback session and so this will be a noop when invoked in RECORD/LIVE mode. * * @param testProxyMatchers the list of matcher rules when playing back recorded data. * @throws RuntimeException Playback has not started. */ public void addMatchers(List<TestProxyRequestMatcher> testProxyMatchers) { if (CoreUtils.isNullOrEmpty(testProxyMatchers)) { return; } if (testMode != TestMode.PLAYBACK) { return; } if (testProxyPlaybackClient != null) { testProxyPlaybackClient.addMatcherRequests(testProxyMatchers); } else { throw new RuntimeException("Playback must have been started before adding matchers."); } } /** * Add matcher rules to match recorded data in playback. * Matchers are only applied for playback session and so this will be a noop when invoked in RECORD/LIVE mode. * * @param testProxyRequestMatchers the list of matcher rules when playing back recorded data. */ public void addMatchers(TestProxyRequestMatcher... testProxyRequestMatchers) { if (testProxyRequestMatchers != null) { addMatchers(Arrays.asList(testProxyRequestMatchers)); } } /** * Get the recording file location in assets repo. * * @return the assets repo location of the recording file. */ public String getRecordingFileLocation() { return xRecordingFileLocation; } /** * Sets the httpClient to be used for this test. * * @param httpClient The {@link HttpClient} implementation to use. */ void setHttpClient(HttpClient httpClient) { this.httpClient = httpClient; } /** * Sets the recording options for the proxy. * * @param testProxyRecordingOptions The {@link TestProxyRecordingOptions} to use. * @throws RuntimeException if test mode is not record. */ public void setProxyRecordingOptions(TestProxyRecordingOptions testProxyRecordingOptions) { if (testMode != TestMode.RECORD) { return; } if (testProxyRecordPolicy != null) { testProxyRecordPolicy.setRecordingOptions(testProxyRecordingOptions); } else { throw new RuntimeException("Recording must have been started before setting recording options."); } } /** * Gets the name of the running test. * * @return Name of the running test. */ public String getTestName() { return testName; } /** * Gets the name of the playback record. * <p> * The playback record name is equivalent to {@code <testClass>.<testMethod>[<testIteration>]}. * * @return Name of the playback record. */ public String getPlaybackRecordName() { return playbackRecordName; } }
Does the `JdbcConnectionDetailsBeanPostProcessor` has an order value defined? maybe we should reference that?
public int getOrder() { return Ordered.HIGHEST_PRECEDENCE + 3; }
return Ordered.HIGHEST_PRECEDENCE + 3;
public int getOrder() { return Ordered.HIGHEST_PRECEDENCE + 3; }
class JdbcPropertiesBeanPostProcessor implements BeanPostProcessor, EnvironmentAware, ApplicationContextAware, PriorityOrdered { private static final Logger LOGGER = LoggerFactory.getLogger(JdbcPropertiesBeanPostProcessor.class); private static final String SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME = SpringTokenCredentialProvider.class.getName(); private static final String SPRING_CLOUD_AZURE_DATASOURCE_PREFIX = "spring.datasource.azure"; private GenericApplicationContext applicationContext; private Environment environment; @Override @Override public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof DataSourceProperties) { DataSourceProperties dataSourceProperties = (DataSourceProperties) bean; AzureJdbcPasswordlessProperties properties = buildAzureProperties(); if (!properties.isPasswordlessEnabled()) { LOGGER.debug("Feature passwordless authentication is not enabled, skip enhancing jdbc url."); return bean; } String url = dataSourceProperties.getUrl(); if (!StringUtils.hasText(url)) { LOGGER.debug("No 'spring.datasource.url' provided, skip enhancing jdbc url."); return bean; } JdbcConnectionString connectionString = JdbcConnectionString.resolve(url); if (connectionString == null) { LOGGER.debug("Can not resolve jdbc connection string from provided {}, skip enhancing jdbc url.", url); return bean; } boolean isPasswordProvided = StringUtils.hasText(dataSourceProperties.getPassword()); if (isPasswordProvided) { LOGGER.debug( "If you are using Azure hosted services," + "it is encouraged to use the passwordless feature. " + "Please refer to https: return bean; } DatabaseType databaseType = connectionString.getDatabaseType(); if (!databaseType.isDatabasePluginAvailable()) { LOGGER.debug("The jdbc plugin with provided jdbc schema is not on the classpath, skip enhancing jdbc url."); return bean; } try { JdbcConnectionStringEnhancer enhancer = new JdbcConnectionStringEnhancer(connectionString); enhancer.enhanceProperties(buildEnhancedProperties(databaseType, properties)); enhanceUserAgent(databaseType, enhancer); ((DataSourceProperties) bean).setUrl(enhancer.getJdbcUrl()); } catch (IllegalArgumentException e) { LOGGER.debug("Inconsistent properties detected, skip enhancing jdbc url."); } } return bean; } private void enhanceUserAgent(DatabaseType databaseType, JdbcConnectionStringEnhancer enhancer) { if (DatabaseType.MYSQL == databaseType) { Map<String, String> enhancedAttributes = new HashMap<>(); enhancedAttributes.put(MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_ATTRIBUTE_EXTENSION_VERSION, AzureSpringIdentifier.AZURE_SPRING_MYSQL_OAUTH); enhancer.enhancePropertyAttributes( MYSQL_PROPERTY_NAME_CONNECTION_ATTRIBUTES, enhancedAttributes, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_DELIMITER, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_KV_DELIMITER ); } else if (DatabaseType.POSTGRESQL == databaseType) { Map<String, String> enhancedProperties = new HashMap<>(); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_APPLICATION_NAME, AzureSpringIdentifier.AZURE_SPRING_POSTGRESQL_OAUTH); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_ASSUME_MIN_SERVER_VERSION, POSTGRESQL_PROPERTY_VALUE_ASSUME_MIN_SERVER_VERSION); enhancer.enhanceProperties(enhancedProperties, true); } } private Map<String, String> buildEnhancedProperties(DatabaseType databaseType, AzureJdbcPasswordlessProperties properties) { Map<String, String> result = new HashMap<>(); TokenCredentialProvider tokenCredentialProvider = TokenCredentialProvider.createDefault(new TokenCredentialProviderOptions(properties.toPasswordlessProperties())); TokenCredential tokenCredential = tokenCredentialProvider.get(); AuthProperty.TOKEN_CREDENTIAL_BEAN_NAME.setProperty(result, PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME); applicationContext.registerBean(PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME, TokenCredential.class, () -> tokenCredential); LOGGER.debug("Add SpringTokenCredentialProvider as the default token credential provider."); AuthProperty.TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME.setProperty(result, SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME); AuthProperty.AUTHORITY_HOST.setProperty(result, properties.getProfile().getEnvironment().getActiveDirectoryEndpoint()); databaseType.setDefaultEnhancedProperties(result); return result; } @Override public void setEnvironment(Environment environment) { this.environment = environment; } @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.applicationContext = (GenericApplicationContext) applicationContext; } private AzureJdbcPasswordlessProperties buildAzureProperties() { AzureGlobalProperties azureGlobalProperties = applicationContext.getBean(AzureGlobalProperties.class); AzureJdbcPasswordlessProperties azurePasswordlessProperties = Binder.get(environment) .bindOrCreate(SPRING_CLOUD_AZURE_DATASOURCE_PREFIX, AzureJdbcPasswordlessProperties.class); AzureJdbcPasswordlessProperties mergedProperties = new AzureJdbcPasswordlessProperties(); AzurePasswordlessPropertiesUtils.mergeAzureCommonProperties(azureGlobalProperties, azurePasswordlessProperties, mergedProperties); return mergedProperties; } }
class JdbcPropertiesBeanPostProcessor implements BeanPostProcessor, EnvironmentAware, ApplicationContextAware, PriorityOrdered { private static final Logger LOGGER = LoggerFactory.getLogger(JdbcPropertiesBeanPostProcessor.class); private static final String SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME = SpringTokenCredentialProvider.class.getName(); private static final String SPRING_CLOUD_AZURE_DATASOURCE_PREFIX = "spring.datasource.azure"; private GenericApplicationContext applicationContext; private Environment environment; @Override @Override public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof DataSourceProperties) { DataSourceProperties dataSourceProperties = (DataSourceProperties) bean; AzureJdbcPasswordlessProperties properties = buildAzureProperties(); if (!properties.isPasswordlessEnabled()) { LOGGER.debug("Feature passwordless authentication is not enabled, skip enhancing jdbc url."); return bean; } String url = dataSourceProperties.getUrl(); if (!StringUtils.hasText(url)) { LOGGER.debug("No 'spring.datasource.url' provided, skip enhancing jdbc url."); return bean; } JdbcConnectionString connectionString = JdbcConnectionString.resolve(url); if (connectionString == null) { LOGGER.debug("Can not resolve jdbc connection string from provided {}, skip enhancing jdbc url.", url); return bean; } boolean isPasswordProvided = StringUtils.hasText(dataSourceProperties.getPassword()); if (isPasswordProvided) { LOGGER.debug( "If you are using Azure hosted services," + "it is encouraged to use the passwordless feature. " + "Please refer to https: return bean; } DatabaseType databaseType = connectionString.getDatabaseType(); if (!databaseType.isDatabasePluginAvailable()) { LOGGER.debug("The jdbc plugin with provided jdbc schema is not on the classpath, skip enhancing jdbc url."); return bean; } try { JdbcConnectionStringEnhancer enhancer = new JdbcConnectionStringEnhancer(connectionString); enhancer.enhanceProperties(buildEnhancedProperties(databaseType, properties)); enhanceUserAgent(databaseType, enhancer); ((DataSourceProperties) bean).setUrl(enhancer.getJdbcUrl()); } catch (IllegalArgumentException e) { LOGGER.debug("Inconsistent properties detected, skip enhancing jdbc url."); } } return bean; } private void enhanceUserAgent(DatabaseType databaseType, JdbcConnectionStringEnhancer enhancer) { if (DatabaseType.MYSQL == databaseType) { Map<String, String> enhancedAttributes = new HashMap<>(); enhancedAttributes.put(MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_ATTRIBUTE_EXTENSION_VERSION, AzureSpringIdentifier.AZURE_SPRING_MYSQL_OAUTH); enhancer.enhancePropertyAttributes( MYSQL_PROPERTY_NAME_CONNECTION_ATTRIBUTES, enhancedAttributes, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_DELIMITER, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_KV_DELIMITER ); } else if (DatabaseType.POSTGRESQL == databaseType) { Map<String, String> enhancedProperties = new HashMap<>(); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_APPLICATION_NAME, AzureSpringIdentifier.AZURE_SPRING_POSTGRESQL_OAUTH); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_ASSUME_MIN_SERVER_VERSION, POSTGRESQL_PROPERTY_VALUE_ASSUME_MIN_SERVER_VERSION); enhancer.enhanceProperties(enhancedProperties, true); } } private Map<String, String> buildEnhancedProperties(DatabaseType databaseType, AzureJdbcPasswordlessProperties properties) { Map<String, String> result = new HashMap<>(); TokenCredentialProvider tokenCredentialProvider = TokenCredentialProvider.createDefault(new TokenCredentialProviderOptions(properties.toPasswordlessProperties())); TokenCredential tokenCredential = tokenCredentialProvider.get(); AuthProperty.TOKEN_CREDENTIAL_BEAN_NAME.setProperty(result, PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME); applicationContext.registerBean(PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME, TokenCredential.class, () -> tokenCredential); LOGGER.debug("Add SpringTokenCredentialProvider as the default token credential provider."); AuthProperty.TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME.setProperty(result, SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME); AuthProperty.AUTHORITY_HOST.setProperty(result, properties.getProfile().getEnvironment().getActiveDirectoryEndpoint()); databaseType.setDefaultEnhancedProperties(result); return result; } @Override public void setEnvironment(Environment environment) { this.environment = environment; } @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.applicationContext = (GenericApplicationContext) applicationContext; } private AzureJdbcPasswordlessProperties buildAzureProperties() { AzureGlobalProperties azureGlobalProperties = applicationContext.getBean(AzureGlobalProperties.class); AzureJdbcPasswordlessProperties azurePasswordlessProperties = Binder.get(environment) .bindOrCreate(SPRING_CLOUD_AZURE_DATASOURCE_PREFIX, AzureJdbcPasswordlessProperties.class); AzureJdbcPasswordlessProperties mergedProperties = new AzureJdbcPasswordlessProperties(); AzurePasswordlessPropertiesUtils.mergeAzureCommonProperties(azureGlobalProperties, azurePasswordlessProperties, mergedProperties); return mergedProperties; } }
Yes.
public int getOrder() { return Ordered.HIGHEST_PRECEDENCE + 3; }
return Ordered.HIGHEST_PRECEDENCE + 3;
public int getOrder() { return Ordered.HIGHEST_PRECEDENCE + 3; }
class JdbcPropertiesBeanPostProcessor implements BeanPostProcessor, EnvironmentAware, ApplicationContextAware, PriorityOrdered { private static final Logger LOGGER = LoggerFactory.getLogger(JdbcPropertiesBeanPostProcessor.class); private static final String SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME = SpringTokenCredentialProvider.class.getName(); private static final String SPRING_CLOUD_AZURE_DATASOURCE_PREFIX = "spring.datasource.azure"; private GenericApplicationContext applicationContext; private Environment environment; @Override @Override public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof DataSourceProperties) { DataSourceProperties dataSourceProperties = (DataSourceProperties) bean; AzureJdbcPasswordlessProperties properties = buildAzureProperties(); if (!properties.isPasswordlessEnabled()) { LOGGER.debug("Feature passwordless authentication is not enabled, skip enhancing jdbc url."); return bean; } String url = dataSourceProperties.getUrl(); if (!StringUtils.hasText(url)) { LOGGER.debug("No 'spring.datasource.url' provided, skip enhancing jdbc url."); return bean; } JdbcConnectionString connectionString = JdbcConnectionString.resolve(url); if (connectionString == null) { LOGGER.debug("Can not resolve jdbc connection string from provided {}, skip enhancing jdbc url.", url); return bean; } boolean isPasswordProvided = StringUtils.hasText(dataSourceProperties.getPassword()); if (isPasswordProvided) { LOGGER.debug( "If you are using Azure hosted services," + "it is encouraged to use the passwordless feature. " + "Please refer to https: return bean; } DatabaseType databaseType = connectionString.getDatabaseType(); if (!databaseType.isDatabasePluginAvailable()) { LOGGER.debug("The jdbc plugin with provided jdbc schema is not on the classpath, skip enhancing jdbc url."); return bean; } try { JdbcConnectionStringEnhancer enhancer = new JdbcConnectionStringEnhancer(connectionString); enhancer.enhanceProperties(buildEnhancedProperties(databaseType, properties)); enhanceUserAgent(databaseType, enhancer); ((DataSourceProperties) bean).setUrl(enhancer.getJdbcUrl()); } catch (IllegalArgumentException e) { LOGGER.debug("Inconsistent properties detected, skip enhancing jdbc url."); } } return bean; } private void enhanceUserAgent(DatabaseType databaseType, JdbcConnectionStringEnhancer enhancer) { if (DatabaseType.MYSQL == databaseType) { Map<String, String> enhancedAttributes = new HashMap<>(); enhancedAttributes.put(MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_ATTRIBUTE_EXTENSION_VERSION, AzureSpringIdentifier.AZURE_SPRING_MYSQL_OAUTH); enhancer.enhancePropertyAttributes( MYSQL_PROPERTY_NAME_CONNECTION_ATTRIBUTES, enhancedAttributes, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_DELIMITER, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_KV_DELIMITER ); } else if (DatabaseType.POSTGRESQL == databaseType) { Map<String, String> enhancedProperties = new HashMap<>(); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_APPLICATION_NAME, AzureSpringIdentifier.AZURE_SPRING_POSTGRESQL_OAUTH); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_ASSUME_MIN_SERVER_VERSION, POSTGRESQL_PROPERTY_VALUE_ASSUME_MIN_SERVER_VERSION); enhancer.enhanceProperties(enhancedProperties, true); } } private Map<String, String> buildEnhancedProperties(DatabaseType databaseType, AzureJdbcPasswordlessProperties properties) { Map<String, String> result = new HashMap<>(); TokenCredentialProvider tokenCredentialProvider = TokenCredentialProvider.createDefault(new TokenCredentialProviderOptions(properties.toPasswordlessProperties())); TokenCredential tokenCredential = tokenCredentialProvider.get(); AuthProperty.TOKEN_CREDENTIAL_BEAN_NAME.setProperty(result, PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME); applicationContext.registerBean(PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME, TokenCredential.class, () -> tokenCredential); LOGGER.debug("Add SpringTokenCredentialProvider as the default token credential provider."); AuthProperty.TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME.setProperty(result, SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME); AuthProperty.AUTHORITY_HOST.setProperty(result, properties.getProfile().getEnvironment().getActiveDirectoryEndpoint()); databaseType.setDefaultEnhancedProperties(result); return result; } @Override public void setEnvironment(Environment environment) { this.environment = environment; } @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.applicationContext = (GenericApplicationContext) applicationContext; } private AzureJdbcPasswordlessProperties buildAzureProperties() { AzureGlobalProperties azureGlobalProperties = applicationContext.getBean(AzureGlobalProperties.class); AzureJdbcPasswordlessProperties azurePasswordlessProperties = Binder.get(environment) .bindOrCreate(SPRING_CLOUD_AZURE_DATASOURCE_PREFIX, AzureJdbcPasswordlessProperties.class); AzureJdbcPasswordlessProperties mergedProperties = new AzureJdbcPasswordlessProperties(); AzurePasswordlessPropertiesUtils.mergeAzureCommonProperties(azureGlobalProperties, azurePasswordlessProperties, mergedProperties); return mergedProperties; } }
class JdbcPropertiesBeanPostProcessor implements BeanPostProcessor, EnvironmentAware, ApplicationContextAware, PriorityOrdered { private static final Logger LOGGER = LoggerFactory.getLogger(JdbcPropertiesBeanPostProcessor.class); private static final String SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME = SpringTokenCredentialProvider.class.getName(); private static final String SPRING_CLOUD_AZURE_DATASOURCE_PREFIX = "spring.datasource.azure"; private GenericApplicationContext applicationContext; private Environment environment; @Override @Override public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof DataSourceProperties) { DataSourceProperties dataSourceProperties = (DataSourceProperties) bean; AzureJdbcPasswordlessProperties properties = buildAzureProperties(); if (!properties.isPasswordlessEnabled()) { LOGGER.debug("Feature passwordless authentication is not enabled, skip enhancing jdbc url."); return bean; } String url = dataSourceProperties.getUrl(); if (!StringUtils.hasText(url)) { LOGGER.debug("No 'spring.datasource.url' provided, skip enhancing jdbc url."); return bean; } JdbcConnectionString connectionString = JdbcConnectionString.resolve(url); if (connectionString == null) { LOGGER.debug("Can not resolve jdbc connection string from provided {}, skip enhancing jdbc url.", url); return bean; } boolean isPasswordProvided = StringUtils.hasText(dataSourceProperties.getPassword()); if (isPasswordProvided) { LOGGER.debug( "If you are using Azure hosted services," + "it is encouraged to use the passwordless feature. " + "Please refer to https: return bean; } DatabaseType databaseType = connectionString.getDatabaseType(); if (!databaseType.isDatabasePluginAvailable()) { LOGGER.debug("The jdbc plugin with provided jdbc schema is not on the classpath, skip enhancing jdbc url."); return bean; } try { JdbcConnectionStringEnhancer enhancer = new JdbcConnectionStringEnhancer(connectionString); enhancer.enhanceProperties(buildEnhancedProperties(databaseType, properties)); enhanceUserAgent(databaseType, enhancer); ((DataSourceProperties) bean).setUrl(enhancer.getJdbcUrl()); } catch (IllegalArgumentException e) { LOGGER.debug("Inconsistent properties detected, skip enhancing jdbc url."); } } return bean; } private void enhanceUserAgent(DatabaseType databaseType, JdbcConnectionStringEnhancer enhancer) { if (DatabaseType.MYSQL == databaseType) { Map<String, String> enhancedAttributes = new HashMap<>(); enhancedAttributes.put(MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_ATTRIBUTE_EXTENSION_VERSION, AzureSpringIdentifier.AZURE_SPRING_MYSQL_OAUTH); enhancer.enhancePropertyAttributes( MYSQL_PROPERTY_NAME_CONNECTION_ATTRIBUTES, enhancedAttributes, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_DELIMITER, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_KV_DELIMITER ); } else if (DatabaseType.POSTGRESQL == databaseType) { Map<String, String> enhancedProperties = new HashMap<>(); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_APPLICATION_NAME, AzureSpringIdentifier.AZURE_SPRING_POSTGRESQL_OAUTH); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_ASSUME_MIN_SERVER_VERSION, POSTGRESQL_PROPERTY_VALUE_ASSUME_MIN_SERVER_VERSION); enhancer.enhanceProperties(enhancedProperties, true); } } private Map<String, String> buildEnhancedProperties(DatabaseType databaseType, AzureJdbcPasswordlessProperties properties) { Map<String, String> result = new HashMap<>(); TokenCredentialProvider tokenCredentialProvider = TokenCredentialProvider.createDefault(new TokenCredentialProviderOptions(properties.toPasswordlessProperties())); TokenCredential tokenCredential = tokenCredentialProvider.get(); AuthProperty.TOKEN_CREDENTIAL_BEAN_NAME.setProperty(result, PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME); applicationContext.registerBean(PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME, TokenCredential.class, () -> tokenCredential); LOGGER.debug("Add SpringTokenCredentialProvider as the default token credential provider."); AuthProperty.TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME.setProperty(result, SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME); AuthProperty.AUTHORITY_HOST.setProperty(result, properties.getProfile().getEnvironment().getActiveDirectoryEndpoint()); databaseType.setDefaultEnhancedProperties(result); return result; } @Override public void setEnvironment(Environment environment) { this.environment = environment; } @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.applicationContext = (GenericApplicationContext) applicationContext; } private AzureJdbcPasswordlessProperties buildAzureProperties() { AzureGlobalProperties azureGlobalProperties = applicationContext.getBean(AzureGlobalProperties.class); AzureJdbcPasswordlessProperties azurePasswordlessProperties = Binder.get(environment) .bindOrCreate(SPRING_CLOUD_AZURE_DATASOURCE_PREFIX, AzureJdbcPasswordlessProperties.class); AzureJdbcPasswordlessProperties mergedProperties = new AzureJdbcPasswordlessProperties(); AzurePasswordlessPropertiesUtils.mergeAzureCommonProperties(azureGlobalProperties, azurePasswordlessProperties, mergedProperties); return mergedProperties; } }
do we need a TSG update for all of the new errors here?
AccessToken authenticateWithExchangeTokenHelper(TokenRequestContext request, String assertionToken) throws IOException { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; String urlParams = "client_assertion=" + assertionToken + "&client_assertion_type=urn:ietf:params:oauth:client-assertion-type:jwt-bearer&client_id=" + clientId + "&grant_type=client_credentials&scope=" + urlEncode(request.getScopes().get(0)); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = getUrl(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setRequestProperty("User-Agent", userAgent); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); return MSIToken.fromJson(JsonProviders.createReader(connection.getInputStream())); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( "Could not connect to the authority host: " + url + ".", exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "Workload Identity authentication unavailable. " + "Connection to the authority host cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "ManagedIdentityCredential authentication unavailable. " + "Connection to IMDS endpoint cannot be established, " + exception.getMessage() + ".", exception)); } throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from Workload Identity.", exception)); } finally { if (connection != null) { connection.disconnect(); } } }
} catch (IOException exception) {
AccessToken authenticateWithExchangeTokenHelper(TokenRequestContext request, String assertionToken) throws IOException { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId + "/oauth2/v2.0/token"; String urlParams = "client_assertion=" + assertionToken + "&client_assertion_type=urn:ietf:params:oauth:client-assertion-type:jwt-bearer&client_id=" + clientId + "&grant_type=client_credentials&scope=" + urlEncode(request.getScopes().get(0)); byte[] postData = urlParams.getBytes(StandardCharsets.UTF_8); int postDataLength = postData.length; HttpURLConnection connection = null; URL url = getUrl(authorityUrl); try { connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Content-Length", Integer.toString(postDataLength)); connection.setRequestProperty("User-Agent", userAgent); connection.setDoOutput(true); try (DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream())) { outputStream.write(postData); } connection.connect(); return MSIToken.fromJson(JsonProviders.createReader(connection.getInputStream())); } catch (IOException exception) { if (connection == null) { throw LOGGER.logExceptionAsError(new RuntimeException( "Could not connect to the authority host: " + url + ".", exception)); } int responseCode; try { responseCode = connection.getResponseCode(); } catch (Exception e) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "WorkloadIdentityCredential authentication unavailable. " + "Connection to the authority host cannot be established, " + e.getMessage() + ".", e)); } if (responseCode == 400) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "WorkloadIdentityCredential authentication unavailable. " + "The request to the authority host was invalid. " + "Additional details: " + exception.getMessage() + ".", exception)); } throw LOGGER.logExceptionAsError(new RuntimeException( "Couldn't acquire access token from Workload Identity.", exception)); } finally { if (connection != null) { connection.disconnect(); } } }
class IdentityClientBase { static final String WINDOWS_STARTER = "cmd.exe"; static final String LINUX_MAC_STARTER = "/bin/sh"; static final String WINDOWS_SWITCHER = "/c"; static final String LINUX_MAC_SWITCHER = "-c"; static final Pattern WINDOWS_PROCESS_ERROR_MESSAGE = Pattern.compile("'azd?' is not recognized"); static final Pattern SH_PROCESS_ERROR_MESSAGE = Pattern.compile("azd?:.*not found"); static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; static final String MSI_ENDPOINT_VERSION = "2017-09-01"; static final String ARC_MANAGED_IDENTITY_ENDPOINT_API_VERSION = "2019-11-01"; static final String ADFS_TENANT = "adfs"; static final String HTTP_LOCALHOST = "http: static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private static final String AZURE_IDENTITY_PROPERTIES = "azure-identity.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final ClientOptions DEFAULT_CLIENT_OPTIONS = new ClientOptions(); private static final Map<String, HttpMethod> HTTP_METHOD_HASH_MAP = new HashMap<>(8); private final Map<String, String> properties = CoreUtils.getProperties(AZURE_IDENTITY_PROPERTIES); final IdentityClientOptions options; final String tenantId; final String clientId; final String resourceId; final String clientSecret; final String clientAssertionFilePath; final byte[] certificate; final String certificatePath; final Supplier<String> clientAssertionSupplier; final Function<HttpPipeline, String> clientAssertionSupplierWithHttpPipeline; final String certificatePassword; HttpPipelineAdapter httpPipelineAdapter; String userAgent = UserAgentUtil.DEFAULT_USER_AGENT_HEADER; private Class<?> interactiveBrowserBroker; private Method getMsalRuntimeBroker; HttpPipeline httpPipeline; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClientBase(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, Function<HttpPipeline, String> clientAssertionSupplierWithHttpPipeline, byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = IdentityUtil.DEFAULT_TENANT; options.setAdditionallyAllowedTenants(Collections.singletonList(IdentityUtil.ALL_TENANTS)); } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.clientAssertionSupplierWithHttpPipeline = clientAssertionSupplierWithHttpPipeline; this.options = options; } ConfidentialClientApplication getConfidentialClient(boolean enableCae) { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { byte[] certificateBytes = getCertificateBytes(); if (CertificateUtil.isPem(certificateBytes)) { List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(certificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(certificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { throw LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else if (clientAssertionSupplierWithHttpPipeline != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplierWithHttpPipeline.apply(getPipeline())); } else { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder .logPii(options.isUnsafeSupportLoggingEnabled()) .authority(authorityUrl) .instanceDiscovery(options.isInstanceDiscoveryEnabled()); if (!options.isInstanceDiscoveryEnabled()) { LOGGER.log(LogLevel.VERBOSE, () -> "Instance discovery and authority validation is disabled. In this" + " state, the library will not fetch metadata to validate the specified authority host. As a" + " result, it is crucial to ensure that the configured authority host is valid and trustworthy."); } } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } if (enableCae) { Set<String> set = new HashSet<>(1); set.add("CP1"); applicationBuilder.clientCapabilities(set); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl(enableCae) .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); if (tokenCache != null) { tokenCache.registerCache(); } return confidentialClientApplication; } PublicClientApplication getPublicClient(boolean sharedTokenCacheCredential, boolean enableCae) { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder builder = PublicClientApplication.builder(clientId); try { builder = builder .logPii(options.isUnsafeSupportLoggingEnabled()) .authority(authorityUrl).instanceDiscovery(options.isInstanceDiscoveryEnabled()); if (!options.isInstanceDiscoveryEnabled()) { LOGGER.log(LogLevel.VERBOSE, () -> "Instance discovery and authority validation is disabled. In this" + " state, the library will not fetch metadata to validate the specified authority host. As a" + " result, it is crucial to ensure that the configured authority host is valid and trustworthy."); } } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { builder.httpClient(httpPipelineAdapter); } else { builder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { builder.executorService(options.getExecutorService()); } if (enableCae) { Set<String> set = new HashSet<>(1); set.add("CP1"); builder.clientCapabilities(set); } if (options.isBrokerEnabled()) { if (interactiveBrowserBroker == null) { try { interactiveBrowserBroker = Class.forName("com.azure.identity.broker.implementation.InteractiveBrowserBroker"); } catch (ClassNotFoundException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("Could not load the brokered authentication library. " + "Ensure that the azure-identity-broker library is on the classpath.", e)); } getMsalRuntimeBroker = null; try { getMsalRuntimeBroker = interactiveBrowserBroker.getMethod("getMsalRuntimeBroker"); } catch (NoSuchMethodException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("Could not obtain the InteractiveBrowserBroker. " + "Ensure that the azure-identity-broker library is on the classpath.", e)); } } try { if (getMsalRuntimeBroker != null) { builder.broker((IBroker) getMsalRuntimeBroker.invoke(null)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("Could not obtain the MSAL Broker. " + "Ensure that the azure-identity-broker library is on the classpath.", null)); } } catch (InvocationTargetException | IllegalAccessException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("Could not invoke the MSAL Broker. " + "Ensure that the azure-identity-broker library is on the classpath.", e)); } } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl(enableCae) .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); if (tokenCache != null) { tokenCache.registerCache(); } return publicClientApplication; } ConfidentialClientApplication getManagedIdentityConfidentialClient() { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential = ClientCredentialFactory .createFromSecret(clientSecret != null ? clientSecret : "dummy-secret"); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId == null ? "SYSTEM-ASSIGNED-MANAGED-IDENTITY" : clientId, credential); applicationBuilder .instanceDiscovery(false) .validateAuthority(false) .logPii(options.isUnsafeSupportLoggingEnabled()); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getManagedIdentityType() == null) { throw LOGGER.logExceptionAsError( new CredentialUnavailableException("Managed Identity type not configured, authentication not available.")); } applicationBuilder.appTokenProvider(appTokenProviderParameters -> { TokenRequestContext trc = new TokenRequestContext() .setScopes(new ArrayList<>(appTokenProviderParameters.scopes)) .setClaims(appTokenProviderParameters.claims) .setTenantId(appTokenProviderParameters.tenantId); Mono<AccessToken> accessTokenAsync = getTokenFromTargetManagedIdentity(trc); return accessTokenAsync.map(accessToken -> { TokenProviderResult result = new TokenProviderResult(); result.setAccessToken(accessToken.getToken()); result.setTenantId(trc.getTenantId()); result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond()); if (accessToken.getRefreshAt() != null) { result.setRefreshInSeconds(accessToken.getRefreshAt().toEpochSecond()); } return result; }).toFuture(); }); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } return applicationBuilder.build(); } ManagedIdentityApplication getManagedIdentityMsalApplication() { ManagedIdentityId managedIdentityId = CoreUtils.isNullOrEmpty(clientId) ? (CoreUtils.isNullOrEmpty(resourceId) ? ManagedIdentityId.systemAssigned() : ManagedIdentityId.userAssignedResourceId(resourceId)) : ManagedIdentityId.userAssignedClientId(clientId); ManagedIdentityApplication.Builder miBuilder = ManagedIdentityApplication .builder(managedIdentityId) .logPii(options.isUnsafeSupportLoggingEnabled()); if ("DEFAULT_TO_IMDS".equals(String.valueOf(ManagedIdentityApplication.getManagedIdentitySource()))) { options.setUseImdsRetryStrategy(); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { miBuilder.httpClient(httpPipelineAdapter); } else { miBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { miBuilder.executorService(options.getExecutorService()); } return miBuilder.build(); } ConfidentialClientApplication getWorkloadIdentityConfidentialClient() { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential = ClientCredentialFactory .createFromSecret(clientSecret != null ? clientSecret : "dummy-secret"); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId == null ? "SYSTEM-ASSIGNED-MANAGED-IDENTITY" : clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .logPii(options.isUnsafeSupportLoggingEnabled()) .instanceDiscovery(options.isInstanceDiscoveryEnabled()); if (!options.isInstanceDiscoveryEnabled()) { LOGGER.log(LogLevel.VERBOSE, () -> "Instance discovery and authority validation is disabled. In this" + " state, the library will not fetch metadata to validate the specified authority host. As a" + " result, it is crucial to ensure that the configured authority host is valid and trustworthy."); } } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } applicationBuilder.appTokenProvider(getWorkloadIdentityTokenProvider()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } return applicationBuilder.build(); } abstract Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider(); DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder buildDeviceCodeFlowParameters(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest claimsRequest = ClaimsRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(claimsRequest); } return parametersBuilder; } OnBehalfOfParameters buildOBOFlowParameters(TokenRequestContext request) { OnBehalfOfParameters.OnBehalfOfParametersBuilder builder = OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); if (request.isCaeEnabled() && request.getClaims() != null) { ClaimsRequest claimsRequest = ClaimsRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(claimsRequest); } return builder.build(); } InteractiveRequestParameters.InteractiveRequestParametersBuilder buildInteractiveRequestParameters(TokenRequestContext request, String loginHint, URI redirectUri) { InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.isCaeEnabled() && request.getClaims() != null) { ClaimsRequest claimsRequest = ClaimsRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(claimsRequest); } BrowserCustomizationOptions browserCustomizationOptions = options.getBrowserCustomizationOptions(); if (IdentityUtil.browserCustomizationOptionsPresent(browserCustomizationOptions)) { SystemBrowserOptions.SystemBrowserOptionsBuilder browserOptionsBuilder = SystemBrowserOptions.builder(); if (!CoreUtils.isNullOrEmpty(browserCustomizationOptions.getSuccessMessage())) { browserOptionsBuilder.htmlMessageSuccess(browserCustomizationOptions.getSuccessMessage()); } if (!CoreUtils.isNullOrEmpty(browserCustomizationOptions.getErrorMessage())) { browserOptionsBuilder.htmlMessageError(browserCustomizationOptions.getErrorMessage()); } builder.systemBrowserOptions(browserOptionsBuilder.build()); } if (options.isBrokerEnabled()) { builder.windowHandle(options.getBrokerWindowHandle()); if (options.isMsaPassthroughEnabled()) { Map<String, String> extraQueryParameters = new HashMap<>(); extraQueryParameters.put("msal_request_type", "consumer_passthrough"); builder.extraQueryParameters(extraQueryParameters); } if (request instanceof PopTokenRequestContext && ((PopTokenRequestContext) request).isProofOfPossessionEnabled()) { PopTokenRequestContext requestContext = (PopTokenRequestContext) request; try { builder.proofOfPossession(mapToMsalHttpMethod(requestContext.getResourceRequestMethod()), requestContext.getResourceRequestUrl().toURI(), requestContext.getProofOfPossessionNonce()); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } } if (loginHint != null) { builder.loginHint(loginHint); } return builder; } static HttpMethod mapToMsalHttpMethod(String methodName) { if (HTTP_METHOD_HASH_MAP.containsKey(methodName)) { return HTTP_METHOD_HASH_MAP.get(methodName); } if (HTTP_METHOD_HASH_MAP.size() > 10) { HTTP_METHOD_HASH_MAP.clear(); } for (HttpMethod method : HttpMethod.values()) { if (method.methodName.equalsIgnoreCase(methodName)) { HTTP_METHOD_HASH_MAP.put(methodName, method); return method; } } throw new IllegalArgumentException("No enum constant with method name: " + methodName); } UserNamePasswordParameters.UserNamePasswordParametersBuilder buildUsernamePasswordFlowParameters(TokenRequestContext request, String username, String password) { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.isCaeEnabled() && request.getClaims() != null) { ClaimsRequest claimsRequest = ClaimsRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(claimsRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return userNamePasswordParametersBuilder; } AccessToken getTokenFromAzureCLIAuthentication(StringBuilder azCommand) { AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); builder.redirectInput(ProcessBuilder.Redirect.from(IdentityUtil.NULL_FILE)); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (WINDOWS_PROCESS_ERROR_MESSAGE.matcher(line).find() || SH_PROCESS_ERROR_MESSAGE.matcher(line).find()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(this.options.getCredentialProcessTimeout().getSeconds(), TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); try (JsonReader reader = JsonProviders.createReader(processOutput)) { AzureCliToken tokenHolder = AzureCliToken.fromJson(reader); String accessToken = tokenHolder.getAccessToken(); OffsetDateTime tokenExpiration = tokenHolder.getTokenExpiration(); token = new AccessToken(accessToken, tokenExpiration); } } catch (IOException | InterruptedException e) { IllegalStateException ex = new IllegalStateException(redactInfo(e.getMessage())); ex.setStackTrace(e.getStackTrace()); throw LOGGER.logExceptionAsError(ex); } return token; } AccessToken getTokenFromAzureDeveloperCLIAuthentication(StringBuilder azdCommand) { AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azdCommand.toString()); builder.redirectInput(ProcessBuilder.Redirect.from(IdentityUtil.NULL_FILE)); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError( new IllegalStateException( "A Safe Working directory could not be" + " found to execute Azure Developer CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (WINDOWS_PROCESS_ERROR_MESSAGE.matcher(line).find() || SH_PROCESS_ERROR_MESSAGE.matcher(line).find()) { throw LoggingUtil.logCredentialUnavailableException( LOGGER, options, new CredentialUnavailableException( "AzureDeveloperCliCredential authentication unavailable. Azure Developer CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(this.options.getCredentialProcessTimeout().getSeconds(), TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("azd auth login") || redactedOutput.contains("not logged in")) { throw LoggingUtil.logCredentialUnavailableException( LOGGER, options, new CredentialUnavailableException( "AzureDeveloperCliCredential authentication unavailable." + " Please run 'azd auth login' to set up account.")); } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure Developer CLI ", null)); } } LOGGER.verbose( "Azure Developer CLI Authentication => A token response was received from Azure Developer CLI, deserializing the" + " response into an Access Token."); try (JsonReader reader = JsonProviders.createReader(processOutput)) { reader.nextToken(); Map<String, String> objectMap = reader.readMap(JsonReader::getString); String accessToken = objectMap.get("token"); String time = objectMap.get("expiresOn"); String standardTime = time.substring(0, time.indexOf("Z")); OffsetDateTime expiresOn = LocalDateTime .parse(standardTime, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.of("Z")) .toOffsetDateTime() .withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } } catch (IOException | InterruptedException e) { IllegalStateException ex = new IllegalStateException(redactInfo(e.getMessage())); ex.setStackTrace(e.getStackTrace()); throw LOGGER.logExceptionAsError(ex); } return token; } String getSafeWorkingDirectory() { if (isWindowsPlatform()) { String windowsSystemRoot = System.getenv("SystemRoot"); if (CoreUtils.isNullOrEmpty(windowsSystemRoot)) { return null; } return windowsSystemRoot + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } abstract Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext); HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); Configuration buildConfiguration = Configuration.getGlobalConfiguration().clone(); HttpLogOptions httpLogOptions = (options.getHttpLogOptions() == null) ? new HttpLogOptions() : options.getHttpLogOptions(); ClientOptions localClientOptions = options.getClientOptions() != null ? options.getClientOptions() : DEFAULT_CLIENT_OPTIONS; userAgent = UserAgentUtil.toUserAgentString(CoreUtils.getApplicationId(localClientOptions, httpLogOptions), clientName, clientVersion, buildConfiguration); policies.add(new UserAgentPolicy(userAgent)); List<HttpHeader> httpHeaderList = new ArrayList<>(); localClientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); policies.addAll(options.getPerCallPolicies()); HttpPolicyProviders.addBeforeRetryPolicies(policies); RetryPolicy retryPolicy = options.getRetryPolicy(); if (retryPolicy == null && options.getUseImdsRetryStrategy()) { retryPolicy = new RetryPolicy(new ImdsRetryStrategy()); } policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, options.getRetryOptions())); policies.addAll(options.getPerRetryPolicies()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .clientOptions(localClientOptions) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } void initializeHttpPipelineAdapter() { if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(getPipeline(), options); } } HttpPipeline getPipeline() { if (this.httpPipeline != null) { return httpPipeline; } HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { this.httpPipeline = httpPipeline; return this.httpPipeline; } HttpClient httpClient = options.getHttpClient(); this.httpPipeline = setupPipeline(httpClient != null ? httpClient : HttpClient.createDefault()); return this.httpPipeline; } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { return certificate; } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return new ByteArrayInputStream(certificate); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Proxy.Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Proxy.Type.HTTP, options.getAddress()); } } static String urlEncode(String value) throws IOException { return URLEncoder.encode(value, StandardCharsets.UTF_8.name()); } static URL getUrl(String uri) throws MalformedURLException { return new URL(uri); } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } }
class IdentityClientBase { static final String WINDOWS_STARTER = "cmd.exe"; static final String LINUX_MAC_STARTER = "/bin/sh"; static final String WINDOWS_SWITCHER = "/c"; static final String LINUX_MAC_SWITCHER = "-c"; static final Pattern WINDOWS_PROCESS_ERROR_MESSAGE = Pattern.compile("'azd?' is not recognized"); static final Pattern SH_PROCESS_ERROR_MESSAGE = Pattern.compile("azd?:.*not found"); static final String DEFAULT_MAC_LINUX_PATH = "/bin/"; static final Duration REFRESH_OFFSET = Duration.ofMinutes(5); static final String IDENTITY_ENDPOINT_VERSION = "2019-08-01"; static final String MSI_ENDPOINT_VERSION = "2017-09-01"; static final String ARC_MANAGED_IDENTITY_ENDPOINT_API_VERSION = "2019-11-01"; static final String ADFS_TENANT = "adfs"; static final String HTTP_LOCALHOST = "http: static final String SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION = "2019-07-01-preview"; static final ClientLogger LOGGER = new ClientLogger(IdentityClient.class); static final Pattern ACCESS_TOKEN_PATTERN = Pattern.compile("\"accessToken\": \"(.*?)(\"|$)"); static final Pattern TRAILING_FORWARD_SLASHES = Pattern.compile("/+$"); private static final String AZURE_IDENTITY_PROPERTIES = "azure-identity.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final ClientOptions DEFAULT_CLIENT_OPTIONS = new ClientOptions(); private static final Map<String, HttpMethod> HTTP_METHOD_HASH_MAP = new HashMap<>(8); private final Map<String, String> properties = CoreUtils.getProperties(AZURE_IDENTITY_PROPERTIES); final IdentityClientOptions options; final String tenantId; final String clientId; final String resourceId; final String clientSecret; final String clientAssertionFilePath; final byte[] certificate; final String certificatePath; final Supplier<String> clientAssertionSupplier; final Function<HttpPipeline, String> clientAssertionSupplierWithHttpPipeline; final String certificatePassword; HttpPipelineAdapter httpPipelineAdapter; String userAgent = UserAgentUtil.DEFAULT_USER_AGENT_HEADER; private Class<?> interactiveBrowserBroker; private Method getMsalRuntimeBroker; HttpPipeline httpPipeline; /** * Creates an IdentityClient with the given options. * * @param tenantId the tenant ID of the application. * @param clientId the client ID of the application. * @param clientSecret the client secret of the application. * @param resourceId the resource ID of the application * @param certificatePath the path to the PKCS12 or PEM certificate of the application. * @param certificate the PKCS12 or PEM certificate of the application. * @param certificatePassword the password protecting the PFX certificate. * @param isSharedTokenCacheCredential Indicate whether the credential is * {@link com.azure.identity.SharedTokenCacheCredential} or not. * @param clientAssertionTimeout the timeout to use for the client assertion. * @param options the options configuring the client. */ IdentityClientBase(String tenantId, String clientId, String clientSecret, String certificatePath, String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier, Function<HttpPipeline, String> clientAssertionSupplierWithHttpPipeline, byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential, Duration clientAssertionTimeout, IdentityClientOptions options) { if (tenantId == null) { tenantId = IdentityUtil.DEFAULT_TENANT; options.setAdditionallyAllowedTenants(Collections.singletonList(IdentityUtil.ALL_TENANTS)); } if (options == null) { options = new IdentityClientOptions(); } this.tenantId = tenantId; this.clientId = clientId; this.resourceId = resourceId; this.clientSecret = clientSecret; this.clientAssertionFilePath = clientAssertionFilePath; this.certificatePath = certificatePath; this.certificate = certificate; this.certificatePassword = certificatePassword; this.clientAssertionSupplier = clientAssertionSupplier; this.clientAssertionSupplierWithHttpPipeline = clientAssertionSupplierWithHttpPipeline; this.options = options; } ConfidentialClientApplication getConfidentialClient(boolean enableCae) { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential; if (clientSecret != null) { credential = ClientCredentialFactory.createFromSecret(clientSecret); } else if (certificate != null || certificatePath != null) { try { byte[] certificateBytes = getCertificateBytes(); if (CertificateUtil.isPem(certificateBytes)) { List<X509Certificate> x509CertificateList = CertificateUtil.publicKeyFromPem(certificateBytes); PrivateKey privateKey = CertificateUtil.privateKeyFromPem(certificateBytes); if (x509CertificateList.size() == 1) { credential = ClientCredentialFactory.createFromCertificate( privateKey, x509CertificateList.get(0)); } else { credential = ClientCredentialFactory.createFromCertificateChain( privateKey, x509CertificateList); } } else { try (InputStream pfxCertificateStream = getCertificateInputStream()) { credential = ClientCredentialFactory.createFromCertificate(pfxCertificateStream, certificatePassword); } } } catch (IOException | GeneralSecurityException e) { throw LOGGER.logExceptionAsError(new RuntimeException( "Failed to parse the certificate for the credential: " + e.getMessage(), e)); } } else if (clientAssertionSupplier != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplier.get()); } else if (clientAssertionSupplierWithHttpPipeline != null) { credential = ClientCredentialFactory.createFromClientAssertion(clientAssertionSupplierWithHttpPipeline.apply(getPipeline())); } else { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Must provide client secret or client certificate path." + " To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId, credential); try { applicationBuilder = applicationBuilder .logPii(options.isUnsafeSupportLoggingEnabled()) .authority(authorityUrl) .instanceDiscovery(options.isInstanceDiscoveryEnabled()); if (!options.isInstanceDiscoveryEnabled()) { LOGGER.log(LogLevel.VERBOSE, () -> "Instance discovery and authority validation is disabled. In this" + " state, the library will not fetch metadata to validate the specified authority host. As a" + " result, it is crucial to ensure that the configured authority host is valid and trustworthy."); } } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } if (enableCae) { Set<String> set = new HashSet<>(1); set.add("CP1"); applicationBuilder.clientCapabilities(set); } applicationBuilder.sendX5c(options.isIncludeX5c()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl(enableCae) .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); applicationBuilder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } if (options.getRegionalAuthority() != null) { if (options.getRegionalAuthority() == RegionalAuthority.AUTO_DISCOVER_REGION) { applicationBuilder.autoDetectRegion(true); } else { applicationBuilder.azureRegion(options.getRegionalAuthority().toString()); } } ConfidentialClientApplication confidentialClientApplication = applicationBuilder.build(); if (tokenCache != null) { tokenCache.registerCache(); } return confidentialClientApplication; } PublicClientApplication getPublicClient(boolean sharedTokenCacheCredential, boolean enableCae) { if (clientId == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "A non-null value for client ID must be provided for user authentication.")); } String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; PublicClientApplication.Builder builder = PublicClientApplication.builder(clientId); try { builder = builder .logPii(options.isUnsafeSupportLoggingEnabled()) .authority(authorityUrl).instanceDiscovery(options.isInstanceDiscoveryEnabled()); if (!options.isInstanceDiscoveryEnabled()) { LOGGER.log(LogLevel.VERBOSE, () -> "Instance discovery and authority validation is disabled. In this" + " state, the library will not fetch metadata to validate the specified authority host. As a" + " result, it is crucial to ensure that the configured authority host is valid and trustworthy."); } } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { builder.httpClient(httpPipelineAdapter); } else { builder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { builder.executorService(options.getExecutorService()); } if (enableCae) { Set<String> set = new HashSet<>(1); set.add("CP1"); builder.clientCapabilities(set); } if (options.isBrokerEnabled()) { if (interactiveBrowserBroker == null) { try { interactiveBrowserBroker = Class.forName("com.azure.identity.broker.implementation.InteractiveBrowserBroker"); } catch (ClassNotFoundException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("Could not load the brokered authentication library. " + "Ensure that the azure-identity-broker library is on the classpath.", e)); } getMsalRuntimeBroker = null; try { getMsalRuntimeBroker = interactiveBrowserBroker.getMethod("getMsalRuntimeBroker"); } catch (NoSuchMethodException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("Could not obtain the InteractiveBrowserBroker. " + "Ensure that the azure-identity-broker library is on the classpath.", e)); } } try { if (getMsalRuntimeBroker != null) { builder.broker((IBroker) getMsalRuntimeBroker.invoke(null)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("Could not obtain the MSAL Broker. " + "Ensure that the azure-identity-broker library is on the classpath.", null)); } } catch (InvocationTargetException | IllegalAccessException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("Could not invoke the MSAL Broker. " + "Ensure that the azure-identity-broker library is on the classpath.", e)); } } TokenCachePersistenceOptions tokenCachePersistenceOptions = options.getTokenCacheOptions(); PersistentTokenCacheImpl tokenCache = null; if (tokenCachePersistenceOptions != null) { try { tokenCache = new PersistentTokenCacheImpl(enableCae) .setAllowUnencryptedStorage(tokenCachePersistenceOptions.isUnencryptedStorageAllowed()) .setName(tokenCachePersistenceOptions.getName()); builder.setTokenCacheAccessAspect(tokenCache); } catch (Throwable t) { throw LOGGER.logExceptionAsError(new ClientAuthenticationException( "Shared token cache is unavailable in this environment.", null, t)); } } PublicClientApplication publicClientApplication = builder.build(); if (tokenCache != null) { tokenCache.registerCache(); } return publicClientApplication; } ConfidentialClientApplication getManagedIdentityConfidentialClient() { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential = ClientCredentialFactory .createFromSecret(clientSecret != null ? clientSecret : "dummy-secret"); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId == null ? "SYSTEM-ASSIGNED-MANAGED-IDENTITY" : clientId, credential); applicationBuilder .instanceDiscovery(false) .validateAuthority(false) .logPii(options.isUnsafeSupportLoggingEnabled()); try { applicationBuilder = applicationBuilder.authority(authorityUrl); } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } if (options.getManagedIdentityType() == null) { throw LOGGER.logExceptionAsError( new CredentialUnavailableException("Managed Identity type not configured, authentication not available.")); } applicationBuilder.appTokenProvider(appTokenProviderParameters -> { TokenRequestContext trc = new TokenRequestContext() .setScopes(new ArrayList<>(appTokenProviderParameters.scopes)) .setClaims(appTokenProviderParameters.claims) .setTenantId(appTokenProviderParameters.tenantId); Mono<AccessToken> accessTokenAsync = getTokenFromTargetManagedIdentity(trc); return accessTokenAsync.map(accessToken -> { TokenProviderResult result = new TokenProviderResult(); result.setAccessToken(accessToken.getToken()); result.setTenantId(trc.getTenantId()); result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond()); if (accessToken.getRefreshAt() != null) { result.setRefreshInSeconds(accessToken.getRefreshAt().toEpochSecond()); } return result; }).toFuture(); }); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } return applicationBuilder.build(); } ManagedIdentityApplication getManagedIdentityMsalApplication() { ManagedIdentityId managedIdentityId = CoreUtils.isNullOrEmpty(clientId) ? (CoreUtils.isNullOrEmpty(resourceId) ? ManagedIdentityId.systemAssigned() : ManagedIdentityId.userAssignedResourceId(resourceId)) : ManagedIdentityId.userAssignedClientId(clientId); ManagedIdentityApplication.Builder miBuilder = ManagedIdentityApplication .builder(managedIdentityId) .logPii(options.isUnsafeSupportLoggingEnabled()); if ("DEFAULT_TO_IMDS".equals(String.valueOf(ManagedIdentityApplication.getManagedIdentitySource()))) { options.setUseImdsRetryStrategy(); } initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { miBuilder.httpClient(httpPipelineAdapter); } else { miBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { miBuilder.executorService(options.getExecutorService()); } return miBuilder.build(); } ConfidentialClientApplication getWorkloadIdentityConfidentialClient() { String authorityUrl = TRAILING_FORWARD_SLASHES.matcher(options.getAuthorityHost()).replaceAll("") + "/" + tenantId; IClientCredential credential = ClientCredentialFactory .createFromSecret(clientSecret != null ? clientSecret : "dummy-secret"); ConfidentialClientApplication.Builder applicationBuilder = ConfidentialClientApplication.builder(clientId == null ? "SYSTEM-ASSIGNED-MANAGED-IDENTITY" : clientId, credential); try { applicationBuilder = applicationBuilder.authority(authorityUrl) .logPii(options.isUnsafeSupportLoggingEnabled()) .instanceDiscovery(options.isInstanceDiscoveryEnabled()); if (!options.isInstanceDiscoveryEnabled()) { LOGGER.log(LogLevel.VERBOSE, () -> "Instance discovery and authority validation is disabled. In this" + " state, the library will not fetch metadata to validate the specified authority host. As a" + " result, it is crucial to ensure that the configured authority host is valid and trustworthy."); } } catch (MalformedURLException e) { throw LOGGER.logExceptionAsWarning(new IllegalStateException(e)); } applicationBuilder.appTokenProvider(getWorkloadIdentityTokenProvider()); initializeHttpPipelineAdapter(); if (httpPipelineAdapter != null) { applicationBuilder.httpClient(httpPipelineAdapter); } else { applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions())); } if (options.getExecutorService() != null) { applicationBuilder.executorService(options.getExecutorService()); } return applicationBuilder.build(); } abstract Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider(); DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder buildDeviceCodeFlowParameters(TokenRequestContext request, Consumer<DeviceCodeInfo> deviceCodeConsumer) { DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = DeviceCodeFlowParameters.builder( new HashSet<>(request.getScopes()), dc -> deviceCodeConsumer.accept( new DeviceCodeInfo(dc.userCode(), dc.deviceCode(), dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.getClaims() != null) { ClaimsRequest claimsRequest = ClaimsRequest.formatAsClaimsRequest(request.getClaims()); parametersBuilder.claims(claimsRequest); } return parametersBuilder; } OnBehalfOfParameters buildOBOFlowParameters(TokenRequestContext request) { OnBehalfOfParameters.OnBehalfOfParametersBuilder builder = OnBehalfOfParameters .builder(new HashSet<>(request.getScopes()), options.getUserAssertion()) .tenant(IdentityUtil.resolveTenantId(tenantId, request, options)); if (request.isCaeEnabled() && request.getClaims() != null) { ClaimsRequest claimsRequest = ClaimsRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(claimsRequest); } return builder.build(); } InteractiveRequestParameters.InteractiveRequestParametersBuilder buildInteractiveRequestParameters(TokenRequestContext request, String loginHint, URI redirectUri) { InteractiveRequestParameters.InteractiveRequestParametersBuilder builder = InteractiveRequestParameters.builder(redirectUri) .scopes(new HashSet<>(request.getScopes())) .prompt(Prompt.SELECT_ACCOUNT) .tenant(IdentityUtil .resolveTenantId(tenantId, request, options)); if (request.isCaeEnabled() && request.getClaims() != null) { ClaimsRequest claimsRequest = ClaimsRequest.formatAsClaimsRequest(request.getClaims()); builder.claims(claimsRequest); } BrowserCustomizationOptions browserCustomizationOptions = options.getBrowserCustomizationOptions(); if (IdentityUtil.browserCustomizationOptionsPresent(browserCustomizationOptions)) { SystemBrowserOptions.SystemBrowserOptionsBuilder browserOptionsBuilder = SystemBrowserOptions.builder(); if (!CoreUtils.isNullOrEmpty(browserCustomizationOptions.getSuccessMessage())) { browserOptionsBuilder.htmlMessageSuccess(browserCustomizationOptions.getSuccessMessage()); } if (!CoreUtils.isNullOrEmpty(browserCustomizationOptions.getErrorMessage())) { browserOptionsBuilder.htmlMessageError(browserCustomizationOptions.getErrorMessage()); } builder.systemBrowserOptions(browserOptionsBuilder.build()); } if (options.isBrokerEnabled()) { builder.windowHandle(options.getBrokerWindowHandle()); if (options.isMsaPassthroughEnabled()) { Map<String, String> extraQueryParameters = new HashMap<>(); extraQueryParameters.put("msal_request_type", "consumer_passthrough"); builder.extraQueryParameters(extraQueryParameters); } if (request instanceof PopTokenRequestContext && ((PopTokenRequestContext) request).isProofOfPossessionEnabled()) { PopTokenRequestContext requestContext = (PopTokenRequestContext) request; try { builder.proofOfPossession(mapToMsalHttpMethod(requestContext.getResourceRequestMethod()), requestContext.getResourceRequestUrl().toURI(), requestContext.getProofOfPossessionNonce()); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } } if (loginHint != null) { builder.loginHint(loginHint); } return builder; } static HttpMethod mapToMsalHttpMethod(String methodName) { if (HTTP_METHOD_HASH_MAP.containsKey(methodName)) { return HTTP_METHOD_HASH_MAP.get(methodName); } if (HTTP_METHOD_HASH_MAP.size() > 10) { HTTP_METHOD_HASH_MAP.clear(); } for (HttpMethod method : HttpMethod.values()) { if (method.methodName.equalsIgnoreCase(methodName)) { HTTP_METHOD_HASH_MAP.put(methodName, method); return method; } } throw new IllegalArgumentException("No enum constant with method name: " + methodName); } UserNamePasswordParameters.UserNamePasswordParametersBuilder buildUsernamePasswordFlowParameters(TokenRequestContext request, String username, String password) { UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder = UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray()); if (request.isCaeEnabled() && request.getClaims() != null) { ClaimsRequest claimsRequest = ClaimsRequest .formatAsClaimsRequest(request.getClaims()); userNamePasswordParametersBuilder.claims(claimsRequest); } userNamePasswordParametersBuilder.tenant( IdentityUtil.resolveTenantId(tenantId, request, options)); return userNamePasswordParametersBuilder; } AccessToken getTokenFromAzureCLIAuthentication(StringBuilder azCommand) { AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azCommand.toString()); builder.redirectInput(ProcessBuilder.Redirect.from(IdentityUtil.NULL_FILE)); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError(new IllegalStateException("A Safe Working directory could not be" + " found to execute CLI command from. To mitigate this issue, please refer to the troubleshooting " + " guidelines here at https: } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (WINDOWS_PROCESS_ERROR_MESSAGE.matcher(line).find() || SH_PROCESS_ERROR_MESSAGE.matcher(line).find()) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable. Azure CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(this.options.getCredentialProcessTimeout().getSeconds(), TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("az login") || redactedOutput.contains("az account set")) { throw LoggingUtil.logCredentialUnavailableException(LOGGER, options, new CredentialUnavailableException( "AzureCliCredential authentication unavailable." + " Please run 'az login' to set up account. To further mitigate this" + " issue, please refer to the troubleshooting guidelines here at " + "https: } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure CLI ", null)); } } LOGGER.verbose("Azure CLI Authentication => A token response was received from Azure CLI, deserializing the" + " response into an Access Token."); try (JsonReader reader = JsonProviders.createReader(processOutput)) { AzureCliToken tokenHolder = AzureCliToken.fromJson(reader); String accessToken = tokenHolder.getAccessToken(); OffsetDateTime tokenExpiration = tokenHolder.getTokenExpiration(); token = new AccessToken(accessToken, tokenExpiration); } } catch (IOException | InterruptedException e) { IllegalStateException ex = new IllegalStateException(redactInfo(e.getMessage())); ex.setStackTrace(e.getStackTrace()); throw LOGGER.logExceptionAsError(ex); } return token; } AccessToken getTokenFromAzureDeveloperCLIAuthentication(StringBuilder azdCommand) { AccessToken token; try { String starter; String switcher; if (isWindowsPlatform()) { starter = WINDOWS_STARTER; switcher = WINDOWS_SWITCHER; } else { starter = LINUX_MAC_STARTER; switcher = LINUX_MAC_SWITCHER; } ProcessBuilder builder = new ProcessBuilder(starter, switcher, azdCommand.toString()); builder.redirectInput(ProcessBuilder.Redirect.from(IdentityUtil.NULL_FILE)); String workingDirectory = getSafeWorkingDirectory(); if (workingDirectory != null) { builder.directory(new File(workingDirectory)); } else { throw LOGGER.logExceptionAsError( new IllegalStateException( "A Safe Working directory could not be" + " found to execute Azure Developer CLI command from.")); } builder.redirectErrorStream(true); Process process = builder.start(); StringBuilder output = new StringBuilder(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8.name()))) { String line; while (true) { line = reader.readLine(); if (line == null) { break; } if (WINDOWS_PROCESS_ERROR_MESSAGE.matcher(line).find() || SH_PROCESS_ERROR_MESSAGE.matcher(line).find()) { throw LoggingUtil.logCredentialUnavailableException( LOGGER, options, new CredentialUnavailableException( "AzureDeveloperCliCredential authentication unavailable. Azure Developer CLI not installed." + "To mitigate this issue, please refer to the troubleshooting guidelines here at " + "https: } output.append(line); } } String processOutput = output.toString(); process.waitFor(this.options.getCredentialProcessTimeout().getSeconds(), TimeUnit.SECONDS); if (process.exitValue() != 0) { if (processOutput.length() > 0) { String redactedOutput = redactInfo(processOutput); if (redactedOutput.contains("azd auth login") || redactedOutput.contains("not logged in")) { throw LoggingUtil.logCredentialUnavailableException( LOGGER, options, new CredentialUnavailableException( "AzureDeveloperCliCredential authentication unavailable." + " Please run 'azd auth login' to set up account.")); } throw LOGGER.logExceptionAsError(new ClientAuthenticationException(redactedOutput, null)); } else { throw LOGGER.logExceptionAsError( new ClientAuthenticationException("Failed to invoke Azure Developer CLI ", null)); } } LOGGER.verbose( "Azure Developer CLI Authentication => A token response was received from Azure Developer CLI, deserializing the" + " response into an Access Token."); try (JsonReader reader = JsonProviders.createReader(processOutput)) { reader.nextToken(); Map<String, String> objectMap = reader.readMap(JsonReader::getString); String accessToken = objectMap.get("token"); String time = objectMap.get("expiresOn"); String standardTime = time.substring(0, time.indexOf("Z")); OffsetDateTime expiresOn = LocalDateTime .parse(standardTime, DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atZone(ZoneId.of("Z")) .toOffsetDateTime() .withOffsetSameInstant(ZoneOffset.UTC); token = new AccessToken(accessToken, expiresOn); } } catch (IOException | InterruptedException e) { IllegalStateException ex = new IllegalStateException(redactInfo(e.getMessage())); ex.setStackTrace(e.getStackTrace()); throw LOGGER.logExceptionAsError(ex); } return token; } String getSafeWorkingDirectory() { if (isWindowsPlatform()) { String windowsSystemRoot = System.getenv("SystemRoot"); if (CoreUtils.isNullOrEmpty(windowsSystemRoot)) { return null; } return windowsSystemRoot + "\\system32"; } else { return DEFAULT_MAC_LINUX_PATH; } } String redactInfo(String input) { return ACCESS_TOKEN_PATTERN.matcher(input).replaceAll("****"); } abstract Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext); HttpPipeline setupPipeline(HttpClient httpClient) { List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); Configuration buildConfiguration = Configuration.getGlobalConfiguration().clone(); HttpLogOptions httpLogOptions = (options.getHttpLogOptions() == null) ? new HttpLogOptions() : options.getHttpLogOptions(); ClientOptions localClientOptions = options.getClientOptions() != null ? options.getClientOptions() : DEFAULT_CLIENT_OPTIONS; userAgent = UserAgentUtil.toUserAgentString(CoreUtils.getApplicationId(localClientOptions, httpLogOptions), clientName, clientVersion, buildConfiguration); policies.add(new UserAgentPolicy(userAgent)); List<HttpHeader> httpHeaderList = new ArrayList<>(); localClientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); policies.addAll(options.getPerCallPolicies()); HttpPolicyProviders.addBeforeRetryPolicies(policies); RetryPolicy retryPolicy = options.getRetryPolicy(); if (retryPolicy == null && options.getUseImdsRetryStrategy()) { retryPolicy = new RetryPolicy(new ImdsRetryStrategy()); } policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, options.getRetryOptions())); policies.addAll(options.getPerRetryPolicies()); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder().httpClient(httpClient) .clientOptions(localClientOptions) .policies(policies.toArray(new HttpPipelinePolicy[0])).build(); } void initializeHttpPipelineAdapter() { if (options.getProxyOptions() == null) { httpPipelineAdapter = new HttpPipelineAdapter(getPipeline(), options); } } HttpPipeline getPipeline() { if (this.httpPipeline != null) { return httpPipeline; } HttpPipeline httpPipeline = options.getHttpPipeline(); if (httpPipeline != null) { this.httpPipeline = httpPipeline; return this.httpPipeline; } HttpClient httpClient = options.getHttpClient(); this.httpPipeline = setupPipeline(httpClient != null ? httpClient : HttpClient.createDefault()); return this.httpPipeline; } private byte[] getCertificateBytes() throws IOException { if (certificatePath != null) { return Files.readAllBytes(Paths.get(certificatePath)); } else if (certificate != null) { return certificate; } else { return new byte[0]; } } private InputStream getCertificateInputStream() throws IOException { if (certificatePath != null) { return new BufferedInputStream(new FileInputStream(certificatePath)); } else { return new ByteArrayInputStream(certificate); } } private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) { switch (options.getType()) { case SOCKS4: case SOCKS5: return new Proxy(Proxy.Type.SOCKS, options.getAddress()); case HTTP: default: return new Proxy(Proxy.Type.HTTP, options.getAddress()); } } static String urlEncode(String value) throws IOException { return URLEncoder.encode(value, StandardCharsets.UTF_8.name()); } static URL getUrl(String uri) throws MalformedURLException { return new URL(uri); } /** * Get the configured tenant id. * * @return the tenant id. */ public String getTenantId() { return tenantId; } /** * Get the configured client id. * * @return the client id. */ public String getClientId() { return clientId; } }
It can accept only one argument, the output of array_generate(3) is [1,2,3]
public Void visitFunctionCall(FunctionCallExpr node, Scope scope) { Type[] argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new); if (node.isNondeterministicBuiltinFnName()) { ExprId exprId = analyzeState.getNextNondeterministicId(); node.setNondeterministicId(exprId); } Function fn; String fnName = node.getFnName().getFunction(); if (fnName.equals(FunctionSet.COUNT) && node.getParams().isDistinct()) { fn = Expr.getBuiltinFunction(FunctionSet.COUNT, new Type[] {argumentTypes[0]}, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else if (fnName.equals(FunctionSet.EXCHANGE_BYTES) || fnName.equals(FunctionSet.EXCHANGE_SPEED)) { fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); fn.setArgsType(argumentTypes); fn.setIsNullable(false); } else if (fnName.equals(FunctionSet.TIME_SLICE) || fnName.equals(FunctionSet.DATE_SLICE)) { if (!(node.getChild(1) instanceof IntLiteral)) { throw new SemanticException( fnName + " requires second parameter must be a constant interval"); } if (((IntLiteral) node.getChild(1)).getValue() <= 0) { throw new SemanticException( fnName + " requires second parameter must be greater than 0"); } fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else if (FunctionSet.decimalRoundFunctions.contains(fnName) || Arrays.stream(argumentTypes).anyMatch(Type::isDecimalV3)) { if (FunctionSet.varianceFunctions.contains(fnName)) { Type[] doubleArgTypes = Stream.of(argumentTypes).map(t -> Type.DOUBLE).toArray(Type[]::new); fn = Expr.getBuiltinFunction(fnName, doubleArgTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else { fn = getDecimalV3Function(node, argumentTypes); } } else if (Arrays.stream(argumentTypes).anyMatch(arg -> arg.matchesType(Type.TIME))) { fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn instanceof AggregateFunction) { throw new SemanticException("Time Type can not used in %s function", fnName); } } else if (FunctionSet.STR_TO_DATE.equals(fnName)) { fn = getStrToDateFunction(node, argumentTypes); } else if (fnName.equals(FunctionSet.ARRAY_FILTER)) { if (node.getChildren().size() != 2) { throw new SemanticException(fnName + " should have 2 array inputs or lambda functions."); } if (!node.getChild(0).getType().isArrayType() && !node.getChild(0).getType().isNull()) { throw new SemanticException("The first input of " + fnName + " should be an array or a lambda function."); } if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) { throw new SemanticException("The second input of " + fnName + " should be an array or a lambda function."); } if (!Type.canCastTo(node.getChild(1).getType(), Type.ARRAY_BOOLEAN)) { throw new SemanticException("The second input of array_filter " + node.getChild(1).getType().toString() + " can't cast to ARRAY<BOOL>"); } node.setChild(1, new CastExpr(Type.ARRAY_BOOLEAN, node.getChild(1))); argumentTypes[1] = Type.ARRAY_BOOLEAN; fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else if (fnName.equals(FunctionSet.ARRAY_SORTBY)) { if (node.getChildren().size() != 2) { throw new SemanticException(fnName + " should have 2 array inputs or lambda functions."); } if (!node.getChild(0).getType().isArrayType() && !node.getChild(0).getType().isNull()) { throw new SemanticException("The first input of " + fnName + " should be an array or a lambda function."); } if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) { throw new SemanticException("The second input of " + fnName + " should be an array or a lambda function."); } fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else if (fnName.equals(FunctionSet.ARRAY_SLICE)) { for (int i = 1; i < argumentTypes.length; i++) { argumentTypes[i] = Type.BIGINT; } fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_SUPERTYPE_OF); } else if (fnName.equals(FunctionSet.ARRAY_CONCAT)) { if (node.getChildren().size() < 2) { throw new SemanticException(fnName + " should have at least two inputs"); } fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else if (fnName.equals("array_generate")) { if (node.getChildren().size() < 1 || node.getChildren().size() > 3) { throw new SemanticException(fnName + " has wrong input numbers"); } for (Expr expr : node.getChildren()) { if ((expr instanceof SlotRef) && node.getChildren().size() != 3) { throw new SemanticException(fnName + " with IntColumn doesn't support default parameters"); } if (!(expr instanceof IntLiteral) && !(expr instanceof LargeIntLiteral) && !(expr instanceof SlotRef) && !(expr instanceof NullLiteral)) { throw new SemanticException(fnName + "'s parameter only support Integer"); } } if (node.getChildren().size() == 1) { LiteralExpr secondParam = (LiteralExpr) node.getChild(0); node.clearChildren(); try { node.addChild(new IntLiteral("1", Type.TINYINT)); node.addChild(secondParam); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } if (node.getChildren().size() == 2) { int idx = 0; BigInteger[] childValues = new BigInteger[2]; for (Expr expr : node.getChildren()) { if (expr instanceof NullLiteral) { throw new SemanticException(fnName + "'s parameter only support Integer"); } else if (expr instanceof IntLiteral) { childValues[idx++] = BigInteger.valueOf(((IntLiteral) expr).getValue()); } else { childValues[idx++] = ((LargeIntLiteral) expr).getValue(); } } if (childValues[0].compareTo(childValues[1]) < 0) { node.addChild(new IntLiteral(1)); } else { node.addChild(new IntLiteral(-1)); } } argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new); fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_SUPERTYPE_OF); } else { fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } if (fn == null) { fn = AnalyzerUtils.getUdfFunction(session, node.getFnName(), argumentTypes); } if (fn == null) { throw new SemanticException("No matching function with signature: %s(%s).", fnName, node.getParams().isStar() ? "*" : Joiner.on(", ") .join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList()))); } if (fn instanceof TableFunction) { throw unsupportedException("Table function cannot be used in expression"); } for (int i = 0; i < fn.getNumArgs(); i++) { if (!argumentTypes[i].matchesType(fn.getArgs()[i]) && !Type.canCastToAsFunctionParameter(argumentTypes[i], fn.getArgs()[i])) { throw new SemanticException("No matching function with signature: %s(%s).", fnName, node.getParams().isStar() ? "*" : Joiner.on(", ") .join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList()))); } } node.setFn(fn); node.setType(fn.getReturnType()); FunctionAnalyzer.analyze(node); return null; }
if (node.getChildren().size() < 1 || node.getChildren().size() > 3) {
public Void visitFunctionCall(FunctionCallExpr node, Scope scope) { Type[] argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new); if (node.isNondeterministicBuiltinFnName()) { ExprId exprId = analyzeState.getNextNondeterministicId(); node.setNondeterministicId(exprId); } Function fn; String fnName = node.getFnName().getFunction(); checkFunction(fnName, node); if (fnName.equals(FunctionSet.COUNT) && node.getParams().isDistinct()) { fn = Expr.getBuiltinFunction(FunctionSet.COUNT, new Type[] {argumentTypes[0]}, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else if (fnName.equals(FunctionSet.EXCHANGE_BYTES) || fnName.equals(FunctionSet.EXCHANGE_SPEED)) { fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); fn.setArgsType(argumentTypes); fn.setIsNullable(false); } else if (DecimalV3FunctionAnalyzer.argumentTypeContainDecimalV3(fnName, argumentTypes)) { fn = DecimalV3FunctionAnalyzer.getDecimalV3Function(session, node, argumentTypes); } else if (Arrays.stream(argumentTypes).anyMatch(arg -> arg.matchesType(Type.TIME))) { fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn instanceof AggregateFunction) { throw new SemanticException("Time Type can not used in" + fnName + " function", node.getPos()); } } else if (FunctionSet.STR_TO_DATE.equals(fnName)) { fn = getStrToDateFunction(node, argumentTypes); } else if (FunctionSet.ARRAY_GENERATE.equals(fnName)) { fn = getArrayGenerateFunction(node); argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new); } else { fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } if (fn == null) { fn = AnalyzerUtils.getUdfFunction(session, node.getFnName(), argumentTypes); } if (fn == null) { String msg = String.format("No matching function with signature: %s(%s)", fnName, node.getParams().isStar() ? "*" : Joiner.on(", ") .join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList()))); throw new SemanticException(msg, node.getPos()); } if (fn instanceof TableFunction) { throw new SemanticException("Table function cannot be used in expression", node.getPos()); } for (int i = 0; i < fn.getNumArgs(); i++) { if (!argumentTypes[i].matchesType(fn.getArgs()[i]) && !Type.canCastToAsFunctionParameter(argumentTypes[i], fn.getArgs()[i])) { String msg = String.format("No matching function with signature: %s(%s)", fnName, node.getParams().isStar() ? "*" : Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.joining(", "))); throw new SemanticException(msg, node.getPos()); } } if (fn.hasVarArgs()) { Type varType = fn.getArgs()[fn.getNumArgs() - 1]; for (int i = fn.getNumArgs(); i < argumentTypes.length; i++) { if (!argumentTypes[i].matchesType(varType) && !Type.canCastToAsFunctionParameter(argumentTypes[i], varType)) { String msg = String.format("Variadic function %s(%s) can't support type: %s", fnName, Arrays.stream(fn.getArgs()).map(Type::toSql).collect(Collectors.joining(", ")), argumentTypes[i]); throw new SemanticException(msg, node.getPos()); } } } node.setFn(fn); node.setType(fn.getReturnType()); FunctionAnalyzer.analyze(node); return null; }
class Visitor extends AstVisitor<Void, Scope> { private static final List<String> ADD_DATE_FUNCTIONS = Lists.newArrayList(FunctionSet.DATE_ADD, FunctionSet.ADDDATE, FunctionSet.DAYS_ADD, FunctionSet.TIMESTAMPADD); private static final List<String> SUB_DATE_FUNCTIONS = Lists.newArrayList(FunctionSet.DATE_SUB, FunctionSet.SUBDATE, FunctionSet.DAYS_SUB); private final AnalyzeState analyzeState; private final ConnectContext session; public Visitor(AnalyzeState analyzeState, ConnectContext session) { this.analyzeState = analyzeState; this.session = session; } @Override public Void visitExpression(Expr node, Scope scope) { throw unsupportedException("not yet implemented: expression analyzer for " + node.getClass().getName()); } private void handleResolvedField(SlotRef slot, ResolvedField resolvedField) { analyzeState.addColumnReference(slot, FieldId.from(resolvedField)); } @Override public Void visitSubfieldExpr(SubfieldExpr node, Scope scope) { Expr child = node.getChild(0); Preconditions.checkArgument(child.getType().isStructType(), String.format("%s must be a struct type, check if you are using `'`", child.toSql())); List<String> fieldNames = node.getFieldNames(); Type tmpType = child.getType(); for (String fieldName : fieldNames) { StructType structType = (StructType) tmpType; StructField structField = structType.getField(fieldName); if (structField == null) { throw new SemanticException("Struct subfield '%s' cannot be resolved", fieldName); } tmpType = structField.getType(); } node.setType(tmpType); return null; } @Override public Void visitSlot(SlotRef node, Scope scope) { ResolvedField resolvedField = scope.resolveField(node); node.setType(resolvedField.getField().getType()); node.setTblName(resolvedField.getField().getRelationAlias()); if (node.getType().isStructType()) { node.setCol(resolvedField.getField().getName()); node.setLabel(resolvedField.getField().getName()); if (resolvedField.getField().getTmpUsedStructFieldPos().size() > 0) { node.setUsedStructFieldPos(resolvedField.getField().getTmpUsedStructFieldPos()); node.resetStructInfo(); } } handleResolvedField(node, resolvedField); return null; } @Override public Void visitFieldReference(FieldReference node, Scope scope) { Field field = scope.getRelationFields().getFieldByIndex(node.getFieldIndex()); node.setType(field.getType()); return null; } @Override public Void visitArrayExpr(ArrayExpr node, Scope scope) { if (!node.getChildren().isEmpty()) { try { Type targetItemType; if (node.getType() != null) { targetItemType = ((ArrayType) node.getType()).getItemType(); } else { targetItemType = TypeManager.getCommonSuperType( node.getChildren().stream().map(Expr::getType).collect(Collectors.toList())); } for (int i = 0; i < node.getChildren().size(); i++) { if (!node.getChildren().get(i).getType().matchesType(targetItemType)) { node.castChild(targetItemType, i); } } node.setType(new ArrayType(targetItemType)); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } else { node.setType(Type.ARRAY_NULL); } return null; } @Override public Void visitCollectionElementExpr(CollectionElementExpr node, Scope scope) { Expr expr = node.getChild(0); Expr subscript = node.getChild(1); if (!expr.getType().isArrayType() && !expr.getType().isMapType()) { throw new SemanticException("cannot subscript type " + expr.getType() + " because it is not an array or a map"); } if (expr.getType().isArrayType()) { if (!subscript.getType().isNumericType()) { throw new SemanticException("array subscript must have type integer"); } try { if (subscript.getType().getPrimitiveType() != PrimitiveType.INT) { node.castChild(Type.INT, 1); } node.setType(((ArrayType) expr.getType()).getItemType()); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } else { try { if (subscript.getType().getPrimitiveType() != ((MapType) expr.getType()).getKeyType().getPrimitiveType()) { node.castChild(((MapType) expr.getType()).getKeyType(), 1); } node.setType(((MapType) expr.getType()).getValueType()); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } return null; } @Override public Void visitArraySliceExpr(ArraySliceExpr node, Scope scope) { if (!node.getChild(0).getType().isArrayType()) { throw new SemanticException("cannot subscript type" + node.getChild(0).getType() + " because it is not an array"); } node.setType(node.getChild(0).getType()); return null; } @Override public Void visitArrowExpr(ArrowExpr node, Scope scope) { Expr item = node.getChild(0); Expr key = node.getChild(1); if (!key.isLiteral() || !key.getType().isStringType()) { throw new SemanticException("right operand of -> should be string literal, but got " + key); } if (!item.getType().isJsonType()) { throw new SemanticException( "-> operator could only be used for json column, but got " + item.getType()); } node.setType(Type.JSON); return null; } @Override public Void visitLambdaFunctionExpr(LambdaFunctionExpr node, Scope scope) { if (scope.getLambdaInputs().size() == 0) { throw new SemanticException("Lambda Functions can only be used in high-order functions with arrays."); } if (scope.getLambdaInputs().size() != node.getChildren().size() - 1) { throw new SemanticException("Lambda arguments should equal to lambda input arrays."); } Set<String> set = new HashSet<>(); List<LambdaArgument> args = Lists.newArrayList(); for (int i = 1; i < node.getChildren().size(); ++i) { args.add((LambdaArgument) node.getChild(i)); String name = ((LambdaArgument) node.getChild(i)).getName(); if (set.contains(name)) { throw new SemanticException("Lambda argument: " + name + " is duplicated."); } set.add(name); ((LambdaArgument) node.getChild(i)).setNullable(scope.getLambdaInputs().get(i - 1).isNullable()); node.getChild(i).setType(scope.getLambdaInputs().get(i - 1).getType()); } Scope lambdaScope = new Scope(args, scope); ExpressionAnalyzer.analyzeExpression(node.getChild(0), this.analyzeState, lambdaScope, this.session); node.setType(Type.FUNCTION); scope.clearLambdaInputs(); return null; } @Override public Void visitCompoundPredicate(CompoundPredicate node, Scope scope) { for (int i = 0; i < node.getChildren().size(); i++) { Type type = node.getChild(i).getType(); if (!type.isBoolean() && !type.isNull()) { throw new SemanticException("Operand '%s' part of predicate " + "'%s' should return type 'BOOLEAN' but returns type '%s'.", AstToStringBuilder.toString(node), AstToStringBuilder.toString(node.getChild(i)), type.toSql()); } } node.setType(Type.BOOLEAN); return null; } @Override public Void visitBetweenPredicate(BetweenPredicate node, Scope scope) { predicateBaseAndCheck(node); List<Type> list = node.getChildren().stream().map(Expr::getType).collect(Collectors.toList()); Type compatibleType = TypeManager.getCompatibleTypeForBetweenAndIn(list); for (Type type : list) { if (!Type.canCastTo(type, compatibleType)) { throw new SemanticException( "between predicate type " + type.toSql() + " with type " + compatibleType.toSql() + " is invalid."); } } return null; } @Override public Void visitBinaryPredicate(BinaryPredicate node, Scope scope) { Type type1 = node.getChild(0).getType(); Type type2 = node.getChild(1).getType(); Type compatibleType = TypeManager.getCompatibleTypeForBinary(node.getOp().isNotRangeComparison(), type1, type2); final String ERROR_MSG = "Column type %s does not support binary predicate operation."; if (!Type.canCastTo(type1, compatibleType)) { throw new SemanticException(String.format(ERROR_MSG, type1.toSql())); } if (!Type.canCastTo(type2, compatibleType)) { throw new SemanticException(String.format(ERROR_MSG, type1.toSql())); } node.setType(Type.BOOLEAN); return null; } @Override public Void visitArithmeticExpr(ArithmeticExpr node, Scope scope) { if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.BINARY_INFIX) { ArithmeticExpr.Operator op = node.getOp(); Type t1 = node.getChild(0).getType().getNumResultType(); Type t2 = node.getChild(1).getType().getNumResultType(); if (t1.isDecimalV3() || t2.isDecimalV3()) { try { node.rewriteDecimalOperation(); } catch (AnalysisException ex) { throw new SemanticException(ex.getMessage()); } Type lhsType = node.getChild(0).getType(); Type rhsType = node.getChild(1).getType(); Type resultType = node.getType(); Type[] args = {lhsType, rhsType}; Function fn = Expr.getBuiltinFunction(op.getName(), args, Function.CompareMode.IS_IDENTICAL); Function newFn = new ScalarFunction(fn.getFunctionName(), args, resultType, fn.hasVarArgs()); node.setType(resultType); node.setFn(newFn); return null; } Type lhsType; Type rhsType; switch (op) { case MULTIPLY: case ADD: case SUBTRACT: lhsType = ArithmeticExpr.getBiggerType(ArithmeticExpr.getCommonType(t1, t2)); rhsType = lhsType; break; case MOD: lhsType = ArithmeticExpr.getCommonType(t1, t2); rhsType = lhsType; break; case DIVIDE: lhsType = ArithmeticExpr.getCommonType(t1, t2); if (lhsType.isFixedPointType()) { lhsType = Type.DOUBLE; } rhsType = lhsType; break; case INT_DIVIDE: case BITAND: case BITOR: case BITXOR: lhsType = ArithmeticExpr.getCommonType(t1, t2); if (!lhsType.isFixedPointType()) { lhsType = Type.BIGINT; } rhsType = lhsType; break; case BIT_SHIFT_LEFT: case BIT_SHIFT_RIGHT: case BIT_SHIFT_RIGHT_LOGICAL: lhsType = t1; rhsType = Type.BIGINT; break; default: throw unsupportedException("Unknown arithmetic operation " + op + " in: " + node); } if (node.getChild(0).getType().equals(Type.NULL) && node.getChild(1).getType().equals(Type.NULL)) { lhsType = Type.NULL; rhsType = Type.NULL; } if (!Type.NULL.equals(node.getChild(0).getType()) && !Type.canCastTo(t1, lhsType)) { throw new SemanticException( "cast type " + node.getChild(0).getType().toSql() + " with type " + lhsType.toSql() + " is invalid."); } if (!Type.NULL.equals(node.getChild(1).getType()) && !Type.canCastTo(t2, rhsType)) { throw new SemanticException( "cast type " + node.getChild(1).getType().toSql() + " with type " + rhsType.toSql() + " is invalid."); } Function fn = Expr.getBuiltinFunction(op.getName(), new Type[] {lhsType, rhsType}, Function.CompareMode.IS_SUPERTYPE_OF); /* * commonType is the common type of the parameters of the function, * and fn.getReturnType() is the return type of the function after execution * So we use fn.getReturnType() as node type */ node.setType(fn.getReturnType()); node.setFn(fn); } else if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.UNARY_PREFIX) { Function fn = Expr.getBuiltinFunction( node.getOp().getName(), new Type[] {Type.BIGINT}, Function.CompareMode.IS_SUPERTYPE_OF); node.setType(Type.BIGINT); node.setFn(fn); } else if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.UNARY_POSTFIX) { throw unsupportedException("not yet implemented: expression analyzer for " + node.getClass().getName()); } else { throw unsupportedException("not yet implemented: expression analyzer for " + node.getClass().getName()); } return null; } List<String> addDateFunctions = Lists.newArrayList(FunctionSet.DATE_ADD, FunctionSet.ADDDATE, FunctionSet.DAYS_ADD, FunctionSet.TIMESTAMPADD); List<String> subDateFunctions = Lists.newArrayList(FunctionSet.DATE_SUB, FunctionSet.SUBDATE, FunctionSet.DAYS_SUB); @Override public Void visitTimestampArithmeticExpr(TimestampArithmeticExpr node, Scope scope) { node.setChild(0, TypeManager.addCastExpr(node.getChild(0), Type.DATETIME)); String funcOpName; if (node.getFuncName() != null) { if (ADD_DATE_FUNCTIONS.contains(node.getFuncName())) { funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "add"); } else if (SUB_DATE_FUNCTIONS.contains(node.getFuncName())) { funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "sub"); } else { node.setChild(1, TypeManager.addCastExpr(node.getChild(1), Type.DATETIME)); funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "diff"); } } else { funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), (node.getOp() == ArithmeticExpr.Operator.ADD) ? "add" : "sub"); } Type[] argumentTypes = node.getChildren().stream().map(Expr::getType) .toArray(Type[]::new); Function fn = Expr.getBuiltinFunction(funcOpName.toLowerCase(), argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn == null) { throw new SemanticException("No matching function with signature: %s(%s).", funcOpName, Joiner.on(", ") .join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList()))); } node.setType(fn.getReturnType()); node.setFn(fn); return null; } @Override public Void visitExistsPredicate(ExistsPredicate node, Scope scope) { predicateBaseAndCheck(node); return null; } @Override public Void visitInPredicate(InPredicate node, Scope scope) { predicateBaseAndCheck(node); List<Expr> queryExpressions = Lists.newArrayList(); node.collect(arg -> arg instanceof Subquery, queryExpressions); if (queryExpressions.size() > 0 && node.getChildren().size() > 2) { throw new SemanticException("In Predicate only support literal expression list"); } List<Type> list = node.getChildren().stream().map(Expr::getType).collect(Collectors.toList()); Type compatibleType = TypeManager.getCompatibleTypeForBetweenAndIn(list); for (Type type : list) { if (type.isJsonType()) { throw new SemanticException("InPredicate of JSON is not supported"); } if (!Type.canCastTo(type, compatibleType)) { throw new SemanticException( "in predicate type " + type.toSql() + " with type " + compatibleType.toSql() + " is invalid."); } } return null; } @Override public Void visitMultiInPredicate(MultiInPredicate node, Scope scope) { predicateBaseAndCheck(node); List<Type> leftTypes = node.getChildren().stream().limit(node.getNumberOfColumns()).map(Expr::getType) .collect(Collectors.toList()); Subquery inSubquery = (Subquery) node.getChild(node.getNumberOfColumns()); List<Type> rightTypes = inSubquery.getQueryStatement().getQueryRelation().getOutputExpression().stream().map(Expr::getType). collect(Collectors.toList()); if (leftTypes.size() != rightTypes.size()) { throw new SemanticException( "subquery must return the same number of columns as provided by the IN predicate"); } for (int i = 0; i < rightTypes.size(); ++i) { if (leftTypes.get(i).isJsonType() || rightTypes.get(i).isJsonType() || leftTypes.get(i).isMapType() || rightTypes.get(i).isMapType() || leftTypes.get(i).isStructType() || rightTypes.get(i).isStructType()) { throw new SemanticException("InPredicate of JSON, Map, Struct types is not supported"); } if (!Type.canCastTo(leftTypes.get(i), rightTypes.get(i))) { throw new SemanticException( "in predicate type " + leftTypes.get(i).toSql() + " with type " + rightTypes.get(i).toSql() + " is invalid."); } } return null; } @Override public Void visitLiteral(LiteralExpr node, Scope scope) { if (node instanceof LargeIntLiteral) { BigInteger value = ((LargeIntLiteral) node).getValue(); if (value.compareTo(LargeIntLiteral.LARGE_INT_MIN) < 0 || value.compareTo(LargeIntLiteral.LARGE_INT_MAX) > 0) { throw new SemanticException("Number Overflow. literal: " + value); } } return null; } @Override public Void visitIsNullPredicate(IsNullPredicate node, Scope scope) { predicateBaseAndCheck(node); return null; } @Override public Void visitLikePredicate(LikePredicate node, Scope scope) { predicateBaseAndCheck(node); Type type1 = node.getChild(0).getType(); Type type2 = node.getChild(1).getType(); if (!type1.isStringType() && !type1.isNull()) { throw new SemanticException( "left operand of " + node.getOp().toString() + " must be of type STRING: " + AstToStringBuilder.toString(node)); } if (!type2.isStringType() && !type2.isNull()) { throw new SemanticException( "right operand of " + node.getOp().toString() + " must be of type STRING: " + AstToStringBuilder.toString(node)); } if (LikePredicate.Operator.REGEXP.equals(node.getOp()) && !type2.isNull() && node.getChild(1).isLiteral()) { try { Pattern.compile(((StringLiteral) node.getChild(1)).getValue()); } catch (PatternSyntaxException e) { throw new SemanticException( "Invalid regular expression in '" + AstToStringBuilder.toString(node) + "'"); } } return null; } private void predicateBaseAndCheck(Predicate node) { node.setType(Type.BOOLEAN); for (Expr expr : node.getChildren()) { if (expr.getType().isOnlyMetricType() || (expr.getType().isComplexType() && !(node instanceof IsNullPredicate))) { throw new SemanticException( "HLL, BITMAP, PERCENTILE and ARRAY, MAP, STRUCT type couldn't as Predicate"); } } } @Override public Void visitCastExpr(CastExpr cast, Scope context) { Type castType; if (cast.isImplicit()) { castType = cast.getType(); } else { castType = cast.getTargetTypeDef().getType(); } if (!Type.canCastTo(cast.getChild(0).getType(), castType)) { throw new SemanticException("Invalid type cast from " + cast.getChild(0).getType().toSql() + " to " + castType.toSql() + " in sql `" + AstToStringBuilder.toString(cast.getChild(0)).replace("%", "%%") + "`"); } cast.setType(castType); return null; } @Override private Function getStrToDateFunction(FunctionCallExpr node, Type[] argumentTypes) { /* * @TODO: Determine the return type of this function * If is format is constant and don't contains time part, return date type, to compatible with mysql. * In fact we don't want to support str_to_date return date like mysql, reason: * 1. The return type of FE/BE str_to_date function signature is datetime, return date * let type different, it's will throw unpredictable error * 2. Support return date and datetime at same time in one function is complicated. * 3. The meaning of the function is confusing. In mysql, will return date if format is a constant * string and it's not contains "%H/%M/%S" pattern, but it's a trick logic, if format is a variable * expression, like: str_to_date(col1, col2), and the col2 is '%Y%m%d', the result always be * datetime. */ Function fn = Expr.getBuiltinFunction(node.getFnName().getFunction(), argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn == null) { return null; } if (!node.getChild(1).isConstant()) { return fn; } ExpressionMapping expressionMapping = new ExpressionMapping(new Scope(RelationId.anonymous(), new RelationFields()), com.google.common.collect.Lists.newArrayList()); ScalarOperator format = SqlToScalarOperatorTranslator.translate(node.getChild(1), expressionMapping, new ColumnRefFactory()); if (format.isConstantRef() && !HAS_TIME_PART.matcher(format.toString()).matches()) { return Expr.getBuiltinFunction("str2date", argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } return fn; } Function getDecimalV3Function(FunctionCallExpr node, Type[] argumentTypes) { Function fn; String fnName = node.getFnName().getFunction(); Type commonType = DecimalV3FunctionAnalyzer.normalizeDecimalArgTypes(argumentTypes, fnName); fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn == null) { fn = AnalyzerUtils.getUdfFunction(session, node.getFnName(), argumentTypes); } if (fn == null) { throw new SemanticException("No matching function with signature: %s(%s).", fnName, node.getParams().isStar() ? "*" : Joiner.on(", ") .join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList()))); } if (DecimalV3FunctionAnalyzer.DECIMAL_AGG_FUNCTION.contains(fnName)) { Type argType = node.getChild(0).getType(); if (DecimalV3FunctionAnalyzer.DECIMAL_AGG_VARIANCE_STDDEV_TYPE .contains(fnName) && argType.isDecimalV3()) { argType = ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL128, 38, 9); node.setChild(0, TypeManager.addCastExpr(node.getChild(0), argType)); } fn = DecimalV3FunctionAnalyzer .rectifyAggregationFunction((AggregateFunction) fn, argType, commonType); } else if (DecimalV3FunctionAnalyzer.DECIMAL_UNARY_FUNCTION_SET.contains(fnName) || DecimalV3FunctionAnalyzer.DECIMAL_IDENTICAL_TYPE_FUNCTION_SET.contains(fnName) || FunctionSet.IF.equals(fnName) || FunctionSet.MAX_BY.equals(fnName)) { List<Type> argTypes; if (FunctionSet.MONEY_FORMAT.equals(fnName)) { argTypes = Arrays.asList(argumentTypes); } else { argTypes = Arrays.stream(fn.getArgs()).map(t -> t.isDecimalV3() ? commonType : t) .collect(Collectors.toList()); } Type returnType = fn.getReturnType(); if (returnType.isDecimalV3() && commonType.isValid()) { returnType = commonType; } if (FunctionSet.MAX_BY.equals(fnName)) { AggregateFunction newFn = new AggregateFunction(fn.getFunctionName(), Arrays.asList(argumentTypes), returnType, Type.VARCHAR, fn.hasVarArgs()); newFn.setFunctionId(fn.getFunctionId()); newFn.setChecksum(fn.getChecksum()); newFn.setBinaryType(fn.getBinaryType()); newFn.setHasVarArgs(fn.hasVarArgs()); newFn.setId(fn.getId()); newFn.setUserVisible(fn.isUserVisible()); newFn.setisAnalyticFn(true); fn = newFn; return fn; } ScalarFunction newFn = new ScalarFunction(fn.getFunctionName(), argTypes, returnType, fn.getLocation(), ((ScalarFunction) fn).getSymbolName(), ((ScalarFunction) fn).getPrepareFnSymbol(), ((ScalarFunction) fn).getCloseFnSymbol()); newFn.setFunctionId(fn.getFunctionId()); newFn.setChecksum(fn.getChecksum()); newFn.setBinaryType(fn.getBinaryType()); newFn.setHasVarArgs(fn.hasVarArgs()); newFn.setId(fn.getId()); newFn.setUserVisible(fn.isUserVisible()); fn = newFn; } else if (FunctionSet.decimalRoundFunctions.contains(fnName)) { List<Type> argTypes = Arrays.stream(fn.getArgs()).map(t -> t.isDecimalV3() ? commonType : t) .collect(Collectors.toList()); fn = DecimalV3FunctionAnalyzer.getFunctionOfRound(node, fn, argTypes); } return fn; } @Override public Void visitGroupingFunctionCall(GroupingFunctionCallExpr node, Scope scope) { if (node.getChildren().size() < 1) { throw new SemanticException("GROUPING functions required at least one parameters"); } if (node.getChildren().stream().anyMatch(e -> !(e instanceof SlotRef))) { throw new SemanticException("grouping functions only support column."); } Type[] childTypes = new Type[1]; childTypes[0] = Type.BIGINT; Function fn = Expr.getBuiltinFunction(node.getFnName().getFunction(), childTypes, Function.CompareMode.IS_IDENTICAL); node.setFn(fn); node.setType(fn.getReturnType()); return null; } @Override public Void visitCaseWhenExpr(CaseExpr node, Scope context) { int start = 0; int end = node.getChildren().size(); Expr caseExpr = null; Expr elseExpr = null; if (node.hasCaseExpr()) { caseExpr = node.getChild(0); start++; } if (node.hasElseExpr()) { elseExpr = node.getChild(end - 1); end--; } if (node.getChildren().stream().anyMatch(d -> !d.getType().isScalarType())) { throw new SemanticException("case-when only support scalar type"); } List<Type> whenTypes = Lists.newArrayList(); if (null != caseExpr) { whenTypes.add(caseExpr.getType()); } for (int i = start; i < end; i = i + 2) { whenTypes.add(node.getChild(i).getType()); } Type compatibleType = Type.NULL; if (null != caseExpr) { compatibleType = TypeManager.getCompatibleTypeForCaseWhen(whenTypes); } for (Type type : whenTypes) { if (!Type.canCastTo(type, compatibleType)) { throw new SemanticException("Invalid when type cast " + type.toSql() + " to " + compatibleType.toSql()); } } List<Type> thenTypes = Lists.newArrayList(); for (int i = start + 1; i < end; i = i + 2) { thenTypes.add(node.getChild(i).getType()); } if (null != elseExpr) { thenTypes.add(elseExpr.getType()); } Type returnType = thenTypes.stream().allMatch(Type.NULL::equals) ? Type.BOOLEAN : TypeManager.getCompatibleTypeForCaseWhen(thenTypes); for (Type type : thenTypes) { if (!Type.canCastTo(type, returnType)) { throw new SemanticException("Invalid then type cast " + type.toSql() + " to " + returnType.toSql()); } } node.setType(returnType); return null; } @Override public Void visitSubquery(Subquery node, Scope context) { QueryAnalyzer queryAnalyzer = new QueryAnalyzer(session); queryAnalyzer.analyze(node.getQueryStatement(), context); node.setType(node.getQueryStatement().getQueryRelation().getRelationFields().getFieldByIndex(0).getType()); return null; } @Override public Void visitAnalyticExpr(AnalyticExpr node, Scope context) { visit(node.getFnCall(), context); node.setType(node.getFnCall().getType()); if (node.getWindow() != null) { if (node.getWindow().getLeftBoundary() != null && node.getWindow().getLeftBoundary().getExpr() != null) { visit(node.getWindow().getLeftBoundary().getExpr(), context); } if (node.getWindow().getRightBoundary() != null && node.getWindow().getRightBoundary().getExpr() != null) { visit(node.getWindow().getRightBoundary().getExpr(), context); } } node.getPartitionExprs().forEach(e -> visit(e, context)); node.getOrderByElements().stream().map(OrderByElement::getExpr).forEach(e -> visit(e, context)); verifyAnalyticExpression(node); return null; } @Override public Void visitInformationFunction(InformationFunction node, Scope context) { String funcType = node.getFuncType(); if (funcType.equalsIgnoreCase("DATABASE") || funcType.equalsIgnoreCase("SCHEMA")) { node.setType(Type.VARCHAR); node.setStrValue(ClusterNamespace.getNameFromFullName(session.getDatabase())); } else if (funcType.equalsIgnoreCase("USER")) { node.setType(Type.VARCHAR); node.setStrValue(session.getUserIdentity().toString()); } else if (funcType.equalsIgnoreCase("CURRENT_USER")) { node.setType(Type.VARCHAR); node.setStrValue(session.getCurrentUserIdentity().toString()); } else if (funcType.equalsIgnoreCase("CURRENT_ROLE")) { node.setType(Type.VARCHAR); PrivilegeManager manager = session.getGlobalStateMgr().getPrivilegeManager(); List<String> roleName = new ArrayList<>(); try { for (Long roleId : session.getCurrentRoleIds()) { RolePrivilegeCollection rolePrivilegeCollection = manager.getRolePrivilegeCollectionUnlocked(roleId, true); roleName.add(rolePrivilegeCollection.getName()); } } catch (PrivilegeException e) { throw new SemanticException(e.getMessage()); } if (roleName.isEmpty()) { node.setStrValue("NONE"); } else { node.setStrValue(Joiner.on(", ").join(roleName)); } } else if (funcType.equalsIgnoreCase("CONNECTION_ID")) { node.setType(Type.BIGINT); node.setIntValue(session.getConnectionId()); node.setStrValue(""); } return null; } @Override public Void visitVariableExpr(VariableExpr node, Scope context) { try { if (node.getSetType().equals(SetType.USER)) { UserVariable userVariable = session.getUserVariables(node.getName()); if (userVariable == null) { node.setType(Type.STRING); node.setIsNull(); return null; } Type variableType = userVariable.getEvaluatedExpression().getType(); node.setType(variableType); if (userVariable.getEvaluatedExpression() instanceof NullLiteral) { node.setIsNull(); } else { node.setValue(userVariable.getEvaluatedExpression().getRealObjectValue()); } } else { VariableMgr.fillValue(session.getSessionVariable(), node); if (!Strings.isNullOrEmpty(node.getName()) && node.getName().equalsIgnoreCase(SessionVariable.SQL_MODE)) { node.setType(Type.VARCHAR); node.setValue(SqlModeHelper.decode((long) node.getValue())); } } } catch (AnalysisException | DdlException e) { throw new SemanticException(e.getMessage()); } return null; } @Override public Void visitDefaultValueExpr(DefaultValueExpr node, Scope context) { node.setType(Type.VARCHAR); return null; } @Override public Void visitCloneExpr(CloneExpr node, Scope context) { return null; } }
class Visitor extends AstVisitor<Void, Scope> { private static final List<String> ADD_DATE_FUNCTIONS = Lists.newArrayList(FunctionSet.DATE_ADD, FunctionSet.ADDDATE, FunctionSet.DAYS_ADD, FunctionSet.TIMESTAMPADD); private static final List<String> SUB_DATE_FUNCTIONS = Lists.newArrayList(FunctionSet.DATE_SUB, FunctionSet.SUBDATE, FunctionSet.DAYS_SUB); private final AnalyzeState analyzeState; private final ConnectContext session; public Visitor(AnalyzeState analyzeState, ConnectContext session) { this.analyzeState = analyzeState; this.session = session; } @Override public Void visitExpression(Expr node, Scope scope) { throw new SemanticException("not yet implemented: expression analyzer for " + node.getClass().getName(), node.getPos()); } private void handleResolvedField(SlotRef slot, ResolvedField resolvedField) { analyzeState.addColumnReference(slot, FieldId.from(resolvedField)); } @Override public Void visitSubfieldExpr(SubfieldExpr node, Scope scope) { Expr child = node.getChild(0); if (!child.getType().isStructType()) { throw new SemanticException(child.toSql() + " must be a struct type, check if you are using `'`", child.getPos()); } List<String> fieldNames = node.getFieldNames(); Type tmpType = child.getType(); for (String fieldName : fieldNames) { StructType structType = (StructType) tmpType; StructField structField = structType.getField(fieldName); if (structField == null) { throw new SemanticException(String.format("Struct subfield '%s' cannot be resolved", fieldName), node.getPos()); } tmpType = structField.getType(); } node.setType(tmpType); return null; } @Override public Void visitSlot(SlotRef node, Scope scope) { ResolvedField resolvedField = scope.resolveField(node); node.setType(resolvedField.getField().getType()); node.setTblName(resolvedField.getField().getRelationAlias()); if (node.getType().isStructType()) { node.setCol(resolvedField.getField().getName()); node.setLabel(resolvedField.getField().getName()); if (resolvedField.getField().getTmpUsedStructFieldPos().size() > 0) { node.setUsedStructFieldPos(resolvedField.getField().getTmpUsedStructFieldPos()); node.resetStructInfo(); } } handleResolvedField(node, resolvedField); return null; } @Override public Void visitFieldReference(FieldReference node, Scope scope) { Field field = scope.getRelationFields().getFieldByIndex(node.getFieldIndex()); node.setType(field.getType()); return null; } @Override public Void visitArrayExpr(ArrayExpr node, Scope scope) { if (!node.getChildren().isEmpty()) { try { Type targetItemType; if (node.getType() != null) { targetItemType = ((ArrayType) node.getType()).getItemType(); } else { targetItemType = TypeManager.getCommonSuperType( node.getChildren().stream().map(Expr::getType).collect(Collectors.toList())); } for (int i = 0; i < node.getChildren().size(); i++) { if (!node.getChildren().get(i).getType().matchesType(targetItemType)) { node.castChild(targetItemType, i); } } node.setType(new ArrayType(targetItemType)); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } else { node.setType(Type.ARRAY_NULL); } return null; } @Override public Void visitMapExpr(MapExpr node, Scope scope) { if (!node.getChildren().isEmpty()) { Type keyType = Type.NULL; Type valueType = Type.NULL; if (node.getKeyExpr() != null) { keyType = node.getKeyExpr().getType(); } if (node.getValueExpr() != null) { valueType = node.getValueExpr().getType(); } node.setType(new MapType(keyType, valueType)); } else { node.setType(new MapType(Type.NULL, Type.NULL)); } return null; } @Override public Void visitCollectionElementExpr(CollectionElementExpr node, Scope scope) { Expr expr = node.getChild(0); Expr subscript = node.getChild(1); if (!expr.getType().isArrayType() && !expr.getType().isMapType()) { throw new SemanticException("cannot subscript type " + expr.getType() + " because it is not an array or a map", expr.getPos()); } if (expr.getType().isArrayType()) { if (!subscript.getType().isNumericType()) { throw new SemanticException("array subscript must have type integer", subscript.getPos()); } try { if (subscript.getType().getPrimitiveType() != PrimitiveType.INT) { node.castChild(Type.INT, 1); } node.setType(((ArrayType) expr.getType()).getItemType()); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } else { try { if (subscript.getType().getPrimitiveType() != ((MapType) expr.getType()).getKeyType().getPrimitiveType()) { node.castChild(((MapType) expr.getType()).getKeyType(), 1); } node.setType(((MapType) expr.getType()).getValueType()); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } return null; } @Override public Void visitArraySliceExpr(ArraySliceExpr node, Scope scope) { if (!node.getChild(0).getType().isArrayType()) { throw new SemanticException("cannot subscript type" + node.getChild(0).getType() + " because it is not an array", node.getChild(0).getPos()); } node.setType(node.getChild(0).getType()); return null; } @Override public Void visitArrowExpr(ArrowExpr node, Scope scope) { Expr item = node.getChild(0); Expr key = node.getChild(1); if (!key.isLiteral() || !key.getType().isStringType()) { throw new SemanticException("right operand of -> should be string literal, but got " + key, key.getPos()); } if (!item.getType().isJsonType()) { throw new SemanticException( "-> operator could only be used for json column, but got " + item.getType(), item.getPos()); } node.setType(Type.JSON); return null; } @Override public Void visitLambdaFunctionExpr(LambdaFunctionExpr node, Scope scope) { if (scope.getLambdaInputs().size() == 0) { throw new SemanticException( "Lambda Functions can only be used in high-order functions with arrays/maps", node.getPos()); } if (scope.getLambdaInputs().size() != node.getChildren().size() - 1) { throw new SemanticException("Lambda arguments should equal to lambda input arrays", node.getPos()); } Set<String> set = new HashSet<>(); List<LambdaArgument> args = Lists.newArrayList(); for (int i = 1; i < node.getChildren().size(); ++i) { args.add((LambdaArgument) node.getChild(i)); String name = ((LambdaArgument) node.getChild(i)).getName(); if (set.contains(name)) { throw new SemanticException("Lambda argument: " + name + " is duplicated", node.getChild(i).getPos()); } set.add(name); ((LambdaArgument) node.getChild(i)).setNullable(scope.getLambdaInputs().get(i - 1).isNullable()); node.getChild(i).setType(scope.getLambdaInputs().get(i - 1).getType()); } Scope lambdaScope = new Scope(args, scope); ExpressionAnalyzer.analyzeExpression(node.getChild(0), this.analyzeState, lambdaScope, this.session); node.setType(Type.FUNCTION); scope.clearLambdaInputs(); return null; } @Override public Void visitCompoundPredicate(CompoundPredicate node, Scope scope) { for (int i = 0; i < node.getChildren().size(); i++) { Type type = node.getChild(i).getType(); if (!type.isBoolean() && !type.isNull()) { String msg = String.format("Operand '%s' part of predicate " + "'%s' should return type 'BOOLEAN' but returns type '%s'", AstToStringBuilder.toString(node), AstToStringBuilder.toString(node.getChild(i)), type.toSql()); throw new SemanticException(msg, node.getChild(i).getPos()); } } node.setType(Type.BOOLEAN); return null; } @Override public Void visitBetweenPredicate(BetweenPredicate node, Scope scope) { predicateBaseAndCheck(node); List<Type> list = node.getChildren().stream().map(Expr::getType).collect(Collectors.toList()); Type compatibleType = TypeManager.getCompatibleTypeForBetweenAndIn(list); for (Type type : list) { if (!Type.canCastTo(type, compatibleType)) { throw new SemanticException( "between predicate type " + type.toSql() + " with type " + compatibleType.toSql() + " is invalid", node.getPos()); } } return null; } @Override public Void visitBinaryPredicate(BinaryPredicate node, Scope scope) { Type type1 = node.getChild(0).getType(); Type type2 = node.getChild(1).getType(); Type compatibleType = TypeManager.getCompatibleTypeForBinary(node.getOp().isNotRangeComparison(), type1, type2); final String ERROR_MSG = "Column type %s does not support binary predicate operation"; if (!Type.canCastTo(type1, compatibleType)) { throw new SemanticException(String.format(ERROR_MSG, type1.toSql()), node.getPos()); } if (!Type.canCastTo(type2, compatibleType)) { throw new SemanticException(String.format(ERROR_MSG, type1.toSql()), node.getPos()); } node.setType(Type.BOOLEAN); return null; } @Override public Void visitArithmeticExpr(ArithmeticExpr node, Scope scope) { if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.BINARY_INFIX) { ArithmeticExpr.Operator op = node.getOp(); Type t1 = node.getChild(0).getType().getNumResultType(); Type t2 = node.getChild(1).getType().getNumResultType(); if (t1.isDecimalV3() || t2.isDecimalV3()) { try { node.rewriteDecimalOperation(); } catch (AnalysisException ex) { throw new SemanticException(ex.getMessage()); } Type lhsType = node.getChild(0).getType(); Type rhsType = node.getChild(1).getType(); Type resultType = node.getType(); Type[] args = {lhsType, rhsType}; Function fn = Expr.getBuiltinFunction(op.getName(), args, Function.CompareMode.IS_IDENTICAL); Function newFn = new ScalarFunction(fn.getFunctionName(), args, resultType, fn.hasVarArgs()); node.setType(resultType); node.setFn(newFn); return null; } Type lhsType; Type rhsType; switch (op) { case MULTIPLY: case ADD: case SUBTRACT: lhsType = ArithmeticExpr.getBiggerType(ArithmeticExpr.getCommonType(t1, t2)); rhsType = lhsType; break; case MOD: lhsType = ArithmeticExpr.getCommonType(t1, t2); rhsType = lhsType; break; case DIVIDE: lhsType = ArithmeticExpr.getCommonType(t1, t2); if (lhsType.isFixedPointType()) { lhsType = Type.DOUBLE; } rhsType = lhsType; break; case INT_DIVIDE: case BITAND: case BITOR: case BITXOR: lhsType = ArithmeticExpr.getCommonType(t1, t2); if (!lhsType.isFixedPointType()) { lhsType = Type.BIGINT; } rhsType = lhsType; break; case BIT_SHIFT_LEFT: case BIT_SHIFT_RIGHT: case BIT_SHIFT_RIGHT_LOGICAL: lhsType = t1; rhsType = Type.BIGINT; break; default: throw new SemanticException("Unknown arithmetic operation " + op + " in: " + node, node.getPos()); } if (node.getChild(0).getType().equals(Type.NULL) && node.getChild(1).getType().equals(Type.NULL)) { lhsType = Type.NULL; rhsType = Type.NULL; } if (lhsType.isInvalid() || rhsType.isInvalid()) { throw new SemanticException("Any function type can not cast to " + Type.INVALID.toSql()); } if (!Type.NULL.equals(node.getChild(0).getType()) && !Type.canCastTo(t1, lhsType)) { throw new SemanticException( "cast type " + node.getChild(0).getType().toSql() + " with type " + lhsType.toSql() + " is invalid", node.getPos()); } if (!Type.NULL.equals(node.getChild(1).getType()) && !Type.canCastTo(t2, rhsType)) { throw new SemanticException( "cast type " + node.getChild(1).getType().toSql() + " with type " + rhsType.toSql() + " is invalid", node.getPos()); } Function fn = Expr.getBuiltinFunction(op.getName(), new Type[] {lhsType, rhsType}, Function.CompareMode.IS_SUPERTYPE_OF); if (fn == null) { throw new SemanticException(String.format( "No matching function '%s' with operand types %s and %s", node.getOp().getName(), t1, t2)); } /* * commonType is the common type of the parameters of the function, * and fn.getReturnType() is the return type of the function after execution * So we use fn.getReturnType() as node type */ node.setType(fn.getReturnType()); node.setFn(fn); } else if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.UNARY_PREFIX) { Function fn = Expr.getBuiltinFunction( node.getOp().getName(), new Type[] {Type.BIGINT}, Function.CompareMode.IS_SUPERTYPE_OF); node.setType(Type.BIGINT); node.setFn(fn); } else if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.UNARY_POSTFIX) { throw new SemanticException("not yet implemented: expression analyzer for " + node.getClass().getName(), node.getPos()); } else { throw new SemanticException("not yet implemented: expression analyzer for " + node.getClass().getName(), node.getPos()); } return null; } @Override public Void visitTimestampArithmeticExpr(TimestampArithmeticExpr node, Scope scope) { node.setChild(0, TypeManager.addCastExpr(node.getChild(0), Type.DATETIME)); String funcOpName; if (node.getFuncName() != null) { if (ADD_DATE_FUNCTIONS.contains(node.getFuncName())) { funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "add"); } else if (SUB_DATE_FUNCTIONS.contains(node.getFuncName())) { funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "sub"); } else { node.setChild(1, TypeManager.addCastExpr(node.getChild(1), Type.DATETIME)); funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "diff"); } } else { funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), (node.getOp() == ArithmeticExpr.Operator.ADD) ? "add" : "sub"); } Type[] argumentTypes = node.getChildren().stream().map(Expr::getType) .toArray(Type[]::new); Function fn = Expr.getBuiltinFunction(funcOpName.toLowerCase(), argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn == null) { String msg = String.format("No matching function with signature: %s(%s)", funcOpName, Joiner.on(", ") .join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList()))); throw new SemanticException(msg, node.getPos()); } node.setType(fn.getReturnType()); node.setFn(fn); return null; } @Override public Void visitExistsPredicate(ExistsPredicate node, Scope scope) { predicateBaseAndCheck(node); return null; } @Override public Void visitInPredicate(InPredicate node, Scope scope) { predicateBaseAndCheck(node); List<Expr> queryExpressions = Lists.newArrayList(); node.collect(arg -> arg instanceof Subquery, queryExpressions); if (queryExpressions.size() > 0 && node.getChildren().size() > 2) { throw new SemanticException("In Predicate only support literal expression list", node.getPos()); } List<Type> list = node.getChildren().stream().map(Expr::getType).collect(Collectors.toList()); Type compatibleType = TypeManager.getCompatibleTypeForBetweenAndIn(list); for (Expr child : node.getChildren()) { Type type = child.getType(); if (type.isJsonType()) { throw new SemanticException("InPredicate of JSON is not supported", child.getPos()); } if (!Type.canCastTo(type, compatibleType)) { throw new SemanticException( "in predicate type " + type.toSql() + " with type " + compatibleType.toSql() + " is invalid", child.getPos()); } } return null; } @Override public Void visitMultiInPredicate(MultiInPredicate node, Scope scope) { predicateBaseAndCheck(node); List<Type> leftTypes = node.getChildren().stream().limit(node.getNumberOfColumns()).map(Expr::getType) .collect(Collectors.toList()); Subquery inSubquery = (Subquery) node.getChild(node.getNumberOfColumns()); List<Type> rightTypes = inSubquery.getQueryStatement().getQueryRelation().getOutputExpression().stream().map(Expr::getType). collect(Collectors.toList()); if (leftTypes.size() != rightTypes.size()) { throw new SemanticException( "subquery must return the same number of columns as provided by the IN predicate", node.getPos()); } for (int i = 0; i < rightTypes.size(); ++i) { if (leftTypes.get(i).isJsonType() || rightTypes.get(i).isJsonType() || leftTypes.get(i).isMapType() || rightTypes.get(i).isMapType() || leftTypes.get(i).isStructType() || rightTypes.get(i).isStructType()) { throw new SemanticException("InPredicate of JSON, Map, Struct types is not supported"); } if (!Type.canCastTo(leftTypes.get(i), rightTypes.get(i))) { throw new SemanticException( "in predicate type " + leftTypes.get(i).toSql() + " with type " + rightTypes.get(i).toSql() + " is invalid"); } } return null; } @Override public Void visitLiteral(LiteralExpr node, Scope scope) { if (node instanceof LargeIntLiteral) { BigInteger value = ((LargeIntLiteral) node).getValue(); if (value.compareTo(LargeIntLiteral.LARGE_INT_MIN) < 0 || value.compareTo(LargeIntLiteral.LARGE_INT_MAX) > 0) { throw new SemanticException(PARSER_ERROR_MSG.numOverflow(value.toString()), node.getPos()); } } return null; } @Override public Void visitIsNullPredicate(IsNullPredicate node, Scope scope) { predicateBaseAndCheck(node); return null; } @Override public Void visitLikePredicate(LikePredicate node, Scope scope) { predicateBaseAndCheck(node); Type type1 = node.getChild(0).getType(); Type type2 = node.getChild(1).getType(); if (!type1.isStringType() && !type1.isNull()) { throw new SemanticException( "left operand of " + node.getOp().toString() + " must be of type STRING: " + AstToStringBuilder.toString(node), node.getPos()); } if (!type2.isStringType() && !type2.isNull()) { throw new SemanticException( "right operand of " + node.getOp().toString() + " must be of type STRING: " + AstToStringBuilder.toString(node), node.getPos()); } if (LikePredicate.Operator.REGEXP.equals(node.getOp()) && !type2.isNull() && node.getChild(1).isLiteral()) { try { Pattern.compile(((StringLiteral) node.getChild(1)).getValue()); } catch (PatternSyntaxException e) { throw new SemanticException( "Invalid regular expression in '" + AstToStringBuilder.toString(node) + "'", node.getPos()); } } return null; } private void predicateBaseAndCheck(Predicate node) { node.setType(Type.BOOLEAN); for (Expr expr : node.getChildren()) { if (expr.getType().isOnlyMetricType() || (expr.getType().isComplexType() && !(node instanceof IsNullPredicate))) { throw new SemanticException( "HLL, BITMAP, PERCENTILE and ARRAY, MAP, STRUCT type couldn't as Predicate", node.getPos()); } } } @Override public Void visitCastExpr(CastExpr cast, Scope context) { Type castType; if (cast.isImplicit()) { castType = cast.getType(); } else { castType = cast.getTargetTypeDef().getType(); } if (!Type.canCastTo(cast.getChild(0).getType(), castType)) { throw new SemanticException("Invalid type cast from " + cast.getChild(0).getType().toSql() + " to " + castType.toSql() + " in sql `" + AstToStringBuilder.toString(cast.getChild(0)).replace("%", "%%") + "`", cast.getPos()); } cast.setType(castType); return null; } @Override private void checkFunction(String fnName, FunctionCallExpr node) { switch (fnName) { case FunctionSet.TIME_SLICE: case FunctionSet.DATE_SLICE: if (!(node.getChild(1) instanceof IntLiteral)) { throw new SemanticException( fnName + " requires second parameter must be a constant interval", node.getPos()); } if (((IntLiteral) node.getChild(1)).getValue() <= 0) { throw new SemanticException( fnName + " requires second parameter must be greater than 0", node.getPos()); } break; case FunctionSet.ARRAY_FILTER: if (node.getChildren().size() != 2) { throw new SemanticException(fnName + " should have 2 array inputs or lambda functions", node.getPos()); } if (!node.getChild(0).getType().isArrayType() && !node.getChild(0).getType().isNull()) { throw new SemanticException("The first input of " + fnName + " should be an array or a lambda function", node.getPos()); } if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) { throw new SemanticException("The second input of " + fnName + " should be an array or a lambda function", node.getPos()); } if (!Type.canCastTo(node.getChild(1).getType(), Type.ARRAY_BOOLEAN)) { throw new SemanticException("The second input of array_filter " + node.getChild(1).getType().toString() + " can't cast to ARRAY<BOOL>", node.getPos()); } break; case FunctionSet.ARRAY_SORTBY: if (node.getChildren().size() != 2) { throw new SemanticException(fnName + " should have 2 array inputs or lambda functions", node.getPos()); } if (!node.getChild(0).getType().isArrayType() && !node.getChild(0).getType().isNull()) { throw new SemanticException("The first input of " + fnName + " should be an array or a lambda function", node.getPos()); } if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) { throw new SemanticException("The second input of " + fnName + " should be an array or a lambda function", node.getPos()); } break; case FunctionSet.ARRAY_CONCAT: if (node.getChildren().size() < 2) { throw new SemanticException(fnName + " should have at least two inputs", node.getPos()); } break; case FunctionSet.ARRAY_GENERATE: if (node.getChildren().size() < 1 || node.getChildren().size() > 3) { throw new SemanticException(fnName + " has wrong input numbers"); } for (Expr expr : node.getChildren()) { if ((expr instanceof SlotRef) && node.getChildren().size() != 3) { throw new SemanticException(fnName + " with IntColumn doesn't support default parameters"); } if (!(expr instanceof IntLiteral) && !(expr instanceof LargeIntLiteral) && !(expr instanceof SlotRef) && !(expr instanceof NullLiteral)) { throw new SemanticException(fnName + "'s parameter only support Integer"); } } break; case FunctionSet.MAP_FILTER: if (node.getChildren().size() != 2) { throw new SemanticException(fnName + " should have 2 inputs, " + "but there are just " + node.getChildren().size() + " inputs."); } if (!node.getChild(0).getType().isMapType() && !node.getChild(0).getType().isNull()) { throw new SemanticException("The first input of " + fnName + " should be a map or a lambda function."); } if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) { throw new SemanticException("The second input of " + fnName + " should be a array or a lambda function."); } if (!Type.canCastTo(node.getChild(1).getType(), Type.ARRAY_BOOLEAN)) { throw new SemanticException("The second input of map_filter " + node.getChild(1).getType().toString() + " can't cast to ARRAY<BOOL>"); } break; } } private Function getStrToDateFunction(FunctionCallExpr node, Type[] argumentTypes) { /* * @TODO: Determine the return type of this function * If is format is constant and don't contains time part, return date type, to compatible with mysql. * In fact we don't want to support str_to_date return date like mysql, reason: * 1. The return type of FE/BE str_to_date function signature is datetime, return date * let type different, it's will throw unpredictable error * 2. Support return date and datetime at same time in one function is complicated. * 3. The meaning of the function is confusing. In mysql, will return date if format is a constant * string and it's not contains "%H/%M/%S" pattern, but it's a trick logic, if format is a variable * expression, like: str_to_date(col1, col2), and the col2 is '%Y%m%d', the result always be * datetime. */ Function fn = Expr.getBuiltinFunction(node.getFnName().getFunction(), argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn == null) { return null; } if (!node.getChild(1).isConstant()) { return fn; } ExpressionMapping expressionMapping = new ExpressionMapping(new Scope(RelationId.anonymous(), new RelationFields()), com.google.common.collect.Lists.newArrayList()); ScalarOperator format = SqlToScalarOperatorTranslator.translate(node.getChild(1), expressionMapping, new ColumnRefFactory()); if (format.isConstantRef() && !HAS_TIME_PART.matcher(format.toString()).matches()) { return Expr.getBuiltinFunction("str2date", argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } return fn; } private Function getArrayGenerateFunction(FunctionCallExpr node) { if (node.getChildren().size() == 1) { LiteralExpr secondParam = (LiteralExpr) node.getChild(0); node.clearChildren(); node.addChild(new IntLiteral(1)); node.addChild(secondParam); } if (node.getChildren().size() == 2) { int idx = 0; BigInteger[] childValues = new BigInteger[2]; Boolean hasNUll = false; for (Expr expr : node.getChildren()) { if (expr instanceof NullLiteral) { hasNUll = true; } else if (expr instanceof IntLiteral) { childValues[idx++] = BigInteger.valueOf(((IntLiteral) expr).getValue()); } else { childValues[idx++] = ((LargeIntLiteral) expr).getValue(); } } if (hasNUll || childValues[0].compareTo(childValues[1]) < 0) { node.addChild(new IntLiteral(1)); } else { node.addChild(new IntLiteral(-1)); } } Type[] argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new); return Expr.getBuiltinFunction(FunctionSet.ARRAY_GENERATE, argumentTypes, Function.CompareMode.IS_SUPERTYPE_OF); } @Override public Void visitGroupingFunctionCall(GroupingFunctionCallExpr node, Scope scope) { if (node.getChildren().size() < 1) { throw new SemanticException("GROUPING functions required at least one parameters", node.getPos()); } if (node.getChildren().stream().anyMatch(e -> !(e instanceof SlotRef))) { throw new SemanticException("grouping functions only support column", node.getPos()); } Type[] childTypes = new Type[1]; childTypes[0] = Type.BIGINT; Function fn = Expr.getBuiltinFunction(node.getFnName().getFunction(), childTypes, Function.CompareMode.IS_IDENTICAL); node.setFn(fn); node.setType(fn.getReturnType()); return null; } @Override public Void visitCaseWhenExpr(CaseExpr node, Scope context) { int start = 0; int end = node.getChildren().size(); Expr caseExpr = null; Expr elseExpr = null; if (node.hasCaseExpr()) { caseExpr = node.getChild(0); start++; } if (node.hasElseExpr()) { elseExpr = node.getChild(end - 1); end--; } if (node.getChildren().stream().anyMatch(d -> !d.getType().isScalarType())) { throw new SemanticException("case-when only support scalar type", node.getPos()); } List<Type> whenTypes = Lists.newArrayList(); if (null != caseExpr) { whenTypes.add(caseExpr.getType()); } for (int i = start; i < end; i = i + 2) { whenTypes.add(node.getChild(i).getType()); } Type compatibleType = Type.NULL; if (null != caseExpr) { compatibleType = TypeManager.getCompatibleTypeForCaseWhen(whenTypes); } for (Type type : whenTypes) { if (!Type.canCastTo(type, compatibleType)) { throw new SemanticException("Invalid when type cast " + type.toSql() + " to " + compatibleType.toSql(), node.getPos()); } } List<Type> thenTypes = Lists.newArrayList(); for (int i = start + 1; i < end; i = i + 2) { thenTypes.add(node.getChild(i).getType()); } if (null != elseExpr) { thenTypes.add(elseExpr.getType()); } Type returnType = thenTypes.stream().allMatch(Type.NULL::equals) ? Type.BOOLEAN : TypeManager.getCompatibleTypeForCaseWhen(thenTypes); for (Type type : thenTypes) { if (!Type.canCastTo(type, returnType)) { throw new SemanticException("Invalid then type cast " + type.toSql() + " to " + returnType.toSql(), node.getPos()); } } node.setType(returnType); return null; } @Override public Void visitSubquery(Subquery node, Scope context) { QueryAnalyzer queryAnalyzer = new QueryAnalyzer(session); queryAnalyzer.analyze(node.getQueryStatement(), context); node.setType(node.getQueryStatement().getQueryRelation().getRelationFields().getFieldByIndex(0).getType()); return null; } @Override public Void visitAnalyticExpr(AnalyticExpr node, Scope context) { visit(node.getFnCall(), context); node.setType(node.getFnCall().getType()); if (node.getWindow() != null) { if (node.getWindow().getLeftBoundary() != null && node.getWindow().getLeftBoundary().getExpr() != null) { visit(node.getWindow().getLeftBoundary().getExpr(), context); } if (node.getWindow().getRightBoundary() != null && node.getWindow().getRightBoundary().getExpr() != null) { visit(node.getWindow().getRightBoundary().getExpr(), context); } } node.getPartitionExprs().forEach(e -> visit(e, context)); node.getOrderByElements().stream().map(OrderByElement::getExpr).forEach(e -> visit(e, context)); verifyAnalyticExpression(node); return null; } @Override public Void visitInformationFunction(InformationFunction node, Scope context) { String funcType = node.getFuncType(); if (funcType.equalsIgnoreCase("DATABASE") || funcType.equalsIgnoreCase("SCHEMA")) { node.setType(Type.VARCHAR); node.setStrValue(ClusterNamespace.getNameFromFullName(session.getDatabase())); } else if (funcType.equalsIgnoreCase("USER")) { node.setType(Type.VARCHAR); String user = session.getQualifiedUser(); String remoteIP = session.getRemoteIP(); node.setStrValue(new UserIdentity(user, remoteIP).toString()); } else if (funcType.equalsIgnoreCase("CURRENT_USER")) { node.setType(Type.VARCHAR); node.setStrValue(session.getCurrentUserIdentity().toString()); } else if (funcType.equalsIgnoreCase("CURRENT_ROLE")) { node.setType(Type.VARCHAR); AuthorizationManager manager = session.getGlobalStateMgr().getAuthorizationManager(); List<String> roleName = new ArrayList<>(); try { for (Long roleId : session.getCurrentRoleIds()) { RolePrivilegeCollection rolePrivilegeCollection = manager.getRolePrivilegeCollectionUnlocked(roleId, false); if (rolePrivilegeCollection != null) { roleName.add(rolePrivilegeCollection.getName()); } } } catch (PrivilegeException e) { throw new SemanticException(e.getMessage()); } if (roleName.isEmpty()) { node.setStrValue("NONE"); } else { node.setStrValue(Joiner.on(", ").join(roleName)); } } else if (funcType.equalsIgnoreCase("CONNECTION_ID")) { node.setType(Type.BIGINT); node.setIntValue(session.getConnectionId()); node.setStrValue(""); } else if (funcType.equalsIgnoreCase("CURRENT_CATALOG")) { node.setType(Type.VARCHAR); node.setStrValue(session.getCurrentCatalog().toString()); } return null; } @Override public Void visitVariableExpr(VariableExpr node, Scope context) { try { if (node.getSetType().equals(SetType.USER)) { UserVariable userVariable = session.getUserVariables(node.getName()); if (userVariable == null) { node.setType(Type.STRING); node.setIsNull(); return null; } Type variableType = userVariable.getEvaluatedExpression().getType(); node.setType(variableType); if (userVariable.getEvaluatedExpression() instanceof NullLiteral) { node.setIsNull(); } else { node.setValue(userVariable.getEvaluatedExpression().getRealObjectValue()); } } else { VariableMgr.fillValue(session.getSessionVariable(), node); if (!Strings.isNullOrEmpty(node.getName()) && node.getName().equalsIgnoreCase(SessionVariable.SQL_MODE)) { node.setType(Type.VARCHAR); node.setValue(SqlModeHelper.decode((long) node.getValue())); } } } catch (AnalysisException | DdlException e) { throw new SemanticException(e.getMessage()); } return null; } @Override public Void visitDefaultValueExpr(DefaultValueExpr node, Scope context) { node.setType(Type.VARCHAR); return null; } @Override public Void visitCloneExpr(CloneExpr node, Scope context) { return null; } }
I will fix it
public Void visitFunctionCall(FunctionCallExpr node, Scope scope) { Type[] argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new); if (node.isNondeterministicBuiltinFnName()) { ExprId exprId = analyzeState.getNextNondeterministicId(); node.setNondeterministicId(exprId); } Function fn; String fnName = node.getFnName().getFunction(); if (fnName.equals(FunctionSet.COUNT) && node.getParams().isDistinct()) { fn = Expr.getBuiltinFunction(FunctionSet.COUNT, new Type[] {argumentTypes[0]}, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else if (fnName.equals(FunctionSet.EXCHANGE_BYTES) || fnName.equals(FunctionSet.EXCHANGE_SPEED)) { fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); fn.setArgsType(argumentTypes); fn.setIsNullable(false); } else if (fnName.equals(FunctionSet.TIME_SLICE) || fnName.equals(FunctionSet.DATE_SLICE)) { if (!(node.getChild(1) instanceof IntLiteral)) { throw new SemanticException( fnName + " requires second parameter must be a constant interval"); } if (((IntLiteral) node.getChild(1)).getValue() <= 0) { throw new SemanticException( fnName + " requires second parameter must be greater than 0"); } fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else if (FunctionSet.decimalRoundFunctions.contains(fnName) || Arrays.stream(argumentTypes).anyMatch(Type::isDecimalV3)) { if (FunctionSet.varianceFunctions.contains(fnName)) { Type[] doubleArgTypes = Stream.of(argumentTypes).map(t -> Type.DOUBLE).toArray(Type[]::new); fn = Expr.getBuiltinFunction(fnName, doubleArgTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else { fn = getDecimalV3Function(node, argumentTypes); } } else if (Arrays.stream(argumentTypes).anyMatch(arg -> arg.matchesType(Type.TIME))) { fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn instanceof AggregateFunction) { throw new SemanticException("Time Type can not used in %s function", fnName); } } else if (FunctionSet.STR_TO_DATE.equals(fnName)) { fn = getStrToDateFunction(node, argumentTypes); } else if (fnName.equals(FunctionSet.ARRAY_FILTER)) { if (node.getChildren().size() != 2) { throw new SemanticException(fnName + " should have 2 array inputs or lambda functions."); } if (!node.getChild(0).getType().isArrayType() && !node.getChild(0).getType().isNull()) { throw new SemanticException("The first input of " + fnName + " should be an array or a lambda function."); } if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) { throw new SemanticException("The second input of " + fnName + " should be an array or a lambda function."); } if (!Type.canCastTo(node.getChild(1).getType(), Type.ARRAY_BOOLEAN)) { throw new SemanticException("The second input of array_filter " + node.getChild(1).getType().toString() + " can't cast to ARRAY<BOOL>"); } node.setChild(1, new CastExpr(Type.ARRAY_BOOLEAN, node.getChild(1))); argumentTypes[1] = Type.ARRAY_BOOLEAN; fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else if (fnName.equals(FunctionSet.ARRAY_SORTBY)) { if (node.getChildren().size() != 2) { throw new SemanticException(fnName + " should have 2 array inputs or lambda functions."); } if (!node.getChild(0).getType().isArrayType() && !node.getChild(0).getType().isNull()) { throw new SemanticException("The first input of " + fnName + " should be an array or a lambda function."); } if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) { throw new SemanticException("The second input of " + fnName + " should be an array or a lambda function."); } fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else if (fnName.equals(FunctionSet.ARRAY_SLICE)) { for (int i = 1; i < argumentTypes.length; i++) { argumentTypes[i] = Type.BIGINT; } fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_SUPERTYPE_OF); } else if (fnName.equals(FunctionSet.ARRAY_CONCAT)) { if (node.getChildren().size() < 2) { throw new SemanticException(fnName + " should have at least two inputs"); } fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else if (fnName.equals("array_generate")) { if (node.getChildren().size() < 1 || node.getChildren().size() > 3) { throw new SemanticException(fnName + " has wrong input numbers"); } for (Expr expr : node.getChildren()) { if ((expr instanceof SlotRef) && node.getChildren().size() != 3) { throw new SemanticException(fnName + " with IntColumn doesn't support default parameters"); } if (!(expr instanceof IntLiteral) && !(expr instanceof LargeIntLiteral) && !(expr instanceof SlotRef) && !(expr instanceof NullLiteral)) { throw new SemanticException(fnName + "'s parameter only support Integer"); } } if (node.getChildren().size() == 1) { LiteralExpr secondParam = (LiteralExpr) node.getChild(0); node.clearChildren(); try { node.addChild(new IntLiteral("1", Type.TINYINT)); node.addChild(secondParam); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } if (node.getChildren().size() == 2) { int idx = 0; BigInteger[] childValues = new BigInteger[2]; for (Expr expr : node.getChildren()) { if (expr instanceof NullLiteral) { throw new SemanticException(fnName + "'s parameter only support Integer"); } else if (expr instanceof IntLiteral) { childValues[idx++] = BigInteger.valueOf(((IntLiteral) expr).getValue()); } else { childValues[idx++] = ((LargeIntLiteral) expr).getValue(); } } if (childValues[0].compareTo(childValues[1]) < 0) { node.addChild(new IntLiteral(1)); } else { node.addChild(new IntLiteral(-1)); } } argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new); fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_SUPERTYPE_OF); } else { fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } if (fn == null) { fn = AnalyzerUtils.getUdfFunction(session, node.getFnName(), argumentTypes); } if (fn == null) { throw new SemanticException("No matching function with signature: %s(%s).", fnName, node.getParams().isStar() ? "*" : Joiner.on(", ") .join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList()))); } if (fn instanceof TableFunction) { throw unsupportedException("Table function cannot be used in expression"); } for (int i = 0; i < fn.getNumArgs(); i++) { if (!argumentTypes[i].matchesType(fn.getArgs()[i]) && !Type.canCastToAsFunctionParameter(argumentTypes[i], fn.getArgs()[i])) { throw new SemanticException("No matching function with signature: %s(%s).", fnName, node.getParams().isStar() ? "*" : Joiner.on(", ") .join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList()))); } } node.setFn(fn); node.setType(fn.getReturnType()); FunctionAnalyzer.analyze(node); return null; }
if (expr instanceof NullLiteral) {
public Void visitFunctionCall(FunctionCallExpr node, Scope scope) { Type[] argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new); if (node.isNondeterministicBuiltinFnName()) { ExprId exprId = analyzeState.getNextNondeterministicId(); node.setNondeterministicId(exprId); } Function fn; String fnName = node.getFnName().getFunction(); checkFunction(fnName, node); if (fnName.equals(FunctionSet.COUNT) && node.getParams().isDistinct()) { fn = Expr.getBuiltinFunction(FunctionSet.COUNT, new Type[] {argumentTypes[0]}, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } else if (fnName.equals(FunctionSet.EXCHANGE_BYTES) || fnName.equals(FunctionSet.EXCHANGE_SPEED)) { fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); fn.setArgsType(argumentTypes); fn.setIsNullable(false); } else if (DecimalV3FunctionAnalyzer.argumentTypeContainDecimalV3(fnName, argumentTypes)) { fn = DecimalV3FunctionAnalyzer.getDecimalV3Function(session, node, argumentTypes); } else if (Arrays.stream(argumentTypes).anyMatch(arg -> arg.matchesType(Type.TIME))) { fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn instanceof AggregateFunction) { throw new SemanticException("Time Type can not used in" + fnName + " function", node.getPos()); } } else if (FunctionSet.STR_TO_DATE.equals(fnName)) { fn = getStrToDateFunction(node, argumentTypes); } else if (FunctionSet.ARRAY_GENERATE.equals(fnName)) { fn = getArrayGenerateFunction(node); argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new); } else { fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } if (fn == null) { fn = AnalyzerUtils.getUdfFunction(session, node.getFnName(), argumentTypes); } if (fn == null) { String msg = String.format("No matching function with signature: %s(%s)", fnName, node.getParams().isStar() ? "*" : Joiner.on(", ") .join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList()))); throw new SemanticException(msg, node.getPos()); } if (fn instanceof TableFunction) { throw new SemanticException("Table function cannot be used in expression", node.getPos()); } for (int i = 0; i < fn.getNumArgs(); i++) { if (!argumentTypes[i].matchesType(fn.getArgs()[i]) && !Type.canCastToAsFunctionParameter(argumentTypes[i], fn.getArgs()[i])) { String msg = String.format("No matching function with signature: %s(%s)", fnName, node.getParams().isStar() ? "*" : Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.joining(", "))); throw new SemanticException(msg, node.getPos()); } } if (fn.hasVarArgs()) { Type varType = fn.getArgs()[fn.getNumArgs() - 1]; for (int i = fn.getNumArgs(); i < argumentTypes.length; i++) { if (!argumentTypes[i].matchesType(varType) && !Type.canCastToAsFunctionParameter(argumentTypes[i], varType)) { String msg = String.format("Variadic function %s(%s) can't support type: %s", fnName, Arrays.stream(fn.getArgs()).map(Type::toSql).collect(Collectors.joining(", ")), argumentTypes[i]); throw new SemanticException(msg, node.getPos()); } } } node.setFn(fn); node.setType(fn.getReturnType()); FunctionAnalyzer.analyze(node); return null; }
class Visitor extends AstVisitor<Void, Scope> { private static final List<String> ADD_DATE_FUNCTIONS = Lists.newArrayList(FunctionSet.DATE_ADD, FunctionSet.ADDDATE, FunctionSet.DAYS_ADD, FunctionSet.TIMESTAMPADD); private static final List<String> SUB_DATE_FUNCTIONS = Lists.newArrayList(FunctionSet.DATE_SUB, FunctionSet.SUBDATE, FunctionSet.DAYS_SUB); private final AnalyzeState analyzeState; private final ConnectContext session; public Visitor(AnalyzeState analyzeState, ConnectContext session) { this.analyzeState = analyzeState; this.session = session; } @Override public Void visitExpression(Expr node, Scope scope) { throw unsupportedException("not yet implemented: expression analyzer for " + node.getClass().getName()); } private void handleResolvedField(SlotRef slot, ResolvedField resolvedField) { analyzeState.addColumnReference(slot, FieldId.from(resolvedField)); } @Override public Void visitSubfieldExpr(SubfieldExpr node, Scope scope) { Expr child = node.getChild(0); Preconditions.checkArgument(child.getType().isStructType(), String.format("%s must be a struct type, check if you are using `'`", child.toSql())); List<String> fieldNames = node.getFieldNames(); Type tmpType = child.getType(); for (String fieldName : fieldNames) { StructType structType = (StructType) tmpType; StructField structField = structType.getField(fieldName); if (structField == null) { throw new SemanticException("Struct subfield '%s' cannot be resolved", fieldName); } tmpType = structField.getType(); } node.setType(tmpType); return null; } @Override public Void visitSlot(SlotRef node, Scope scope) { ResolvedField resolvedField = scope.resolveField(node); node.setType(resolvedField.getField().getType()); node.setTblName(resolvedField.getField().getRelationAlias()); if (node.getType().isStructType()) { node.setCol(resolvedField.getField().getName()); node.setLabel(resolvedField.getField().getName()); if (resolvedField.getField().getTmpUsedStructFieldPos().size() > 0) { node.setUsedStructFieldPos(resolvedField.getField().getTmpUsedStructFieldPos()); node.resetStructInfo(); } } handleResolvedField(node, resolvedField); return null; } @Override public Void visitFieldReference(FieldReference node, Scope scope) { Field field = scope.getRelationFields().getFieldByIndex(node.getFieldIndex()); node.setType(field.getType()); return null; } @Override public Void visitArrayExpr(ArrayExpr node, Scope scope) { if (!node.getChildren().isEmpty()) { try { Type targetItemType; if (node.getType() != null) { targetItemType = ((ArrayType) node.getType()).getItemType(); } else { targetItemType = TypeManager.getCommonSuperType( node.getChildren().stream().map(Expr::getType).collect(Collectors.toList())); } for (int i = 0; i < node.getChildren().size(); i++) { if (!node.getChildren().get(i).getType().matchesType(targetItemType)) { node.castChild(targetItemType, i); } } node.setType(new ArrayType(targetItemType)); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } else { node.setType(Type.ARRAY_NULL); } return null; } @Override public Void visitCollectionElementExpr(CollectionElementExpr node, Scope scope) { Expr expr = node.getChild(0); Expr subscript = node.getChild(1); if (!expr.getType().isArrayType() && !expr.getType().isMapType()) { throw new SemanticException("cannot subscript type " + expr.getType() + " because it is not an array or a map"); } if (expr.getType().isArrayType()) { if (!subscript.getType().isNumericType()) { throw new SemanticException("array subscript must have type integer"); } try { if (subscript.getType().getPrimitiveType() != PrimitiveType.INT) { node.castChild(Type.INT, 1); } node.setType(((ArrayType) expr.getType()).getItemType()); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } else { try { if (subscript.getType().getPrimitiveType() != ((MapType) expr.getType()).getKeyType().getPrimitiveType()) { node.castChild(((MapType) expr.getType()).getKeyType(), 1); } node.setType(((MapType) expr.getType()).getValueType()); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } return null; } @Override public Void visitArraySliceExpr(ArraySliceExpr node, Scope scope) { if (!node.getChild(0).getType().isArrayType()) { throw new SemanticException("cannot subscript type" + node.getChild(0).getType() + " because it is not an array"); } node.setType(node.getChild(0).getType()); return null; } @Override public Void visitArrowExpr(ArrowExpr node, Scope scope) { Expr item = node.getChild(0); Expr key = node.getChild(1); if (!key.isLiteral() || !key.getType().isStringType()) { throw new SemanticException("right operand of -> should be string literal, but got " + key); } if (!item.getType().isJsonType()) { throw new SemanticException( "-> operator could only be used for json column, but got " + item.getType()); } node.setType(Type.JSON); return null; } @Override public Void visitLambdaFunctionExpr(LambdaFunctionExpr node, Scope scope) { if (scope.getLambdaInputs().size() == 0) { throw new SemanticException("Lambda Functions can only be used in high-order functions with arrays."); } if (scope.getLambdaInputs().size() != node.getChildren().size() - 1) { throw new SemanticException("Lambda arguments should equal to lambda input arrays."); } Set<String> set = new HashSet<>(); List<LambdaArgument> args = Lists.newArrayList(); for (int i = 1; i < node.getChildren().size(); ++i) { args.add((LambdaArgument) node.getChild(i)); String name = ((LambdaArgument) node.getChild(i)).getName(); if (set.contains(name)) { throw new SemanticException("Lambda argument: " + name + " is duplicated."); } set.add(name); ((LambdaArgument) node.getChild(i)).setNullable(scope.getLambdaInputs().get(i - 1).isNullable()); node.getChild(i).setType(scope.getLambdaInputs().get(i - 1).getType()); } Scope lambdaScope = new Scope(args, scope); ExpressionAnalyzer.analyzeExpression(node.getChild(0), this.analyzeState, lambdaScope, this.session); node.setType(Type.FUNCTION); scope.clearLambdaInputs(); return null; } @Override public Void visitCompoundPredicate(CompoundPredicate node, Scope scope) { for (int i = 0; i < node.getChildren().size(); i++) { Type type = node.getChild(i).getType(); if (!type.isBoolean() && !type.isNull()) { throw new SemanticException("Operand '%s' part of predicate " + "'%s' should return type 'BOOLEAN' but returns type '%s'.", AstToStringBuilder.toString(node), AstToStringBuilder.toString(node.getChild(i)), type.toSql()); } } node.setType(Type.BOOLEAN); return null; } @Override public Void visitBetweenPredicate(BetweenPredicate node, Scope scope) { predicateBaseAndCheck(node); List<Type> list = node.getChildren().stream().map(Expr::getType).collect(Collectors.toList()); Type compatibleType = TypeManager.getCompatibleTypeForBetweenAndIn(list); for (Type type : list) { if (!Type.canCastTo(type, compatibleType)) { throw new SemanticException( "between predicate type " + type.toSql() + " with type " + compatibleType.toSql() + " is invalid."); } } return null; } @Override public Void visitBinaryPredicate(BinaryPredicate node, Scope scope) { Type type1 = node.getChild(0).getType(); Type type2 = node.getChild(1).getType(); Type compatibleType = TypeManager.getCompatibleTypeForBinary(node.getOp().isNotRangeComparison(), type1, type2); final String ERROR_MSG = "Column type %s does not support binary predicate operation."; if (!Type.canCastTo(type1, compatibleType)) { throw new SemanticException(String.format(ERROR_MSG, type1.toSql())); } if (!Type.canCastTo(type2, compatibleType)) { throw new SemanticException(String.format(ERROR_MSG, type1.toSql())); } node.setType(Type.BOOLEAN); return null; } @Override public Void visitArithmeticExpr(ArithmeticExpr node, Scope scope) { if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.BINARY_INFIX) { ArithmeticExpr.Operator op = node.getOp(); Type t1 = node.getChild(0).getType().getNumResultType(); Type t2 = node.getChild(1).getType().getNumResultType(); if (t1.isDecimalV3() || t2.isDecimalV3()) { try { node.rewriteDecimalOperation(); } catch (AnalysisException ex) { throw new SemanticException(ex.getMessage()); } Type lhsType = node.getChild(0).getType(); Type rhsType = node.getChild(1).getType(); Type resultType = node.getType(); Type[] args = {lhsType, rhsType}; Function fn = Expr.getBuiltinFunction(op.getName(), args, Function.CompareMode.IS_IDENTICAL); Function newFn = new ScalarFunction(fn.getFunctionName(), args, resultType, fn.hasVarArgs()); node.setType(resultType); node.setFn(newFn); return null; } Type lhsType; Type rhsType; switch (op) { case MULTIPLY: case ADD: case SUBTRACT: lhsType = ArithmeticExpr.getBiggerType(ArithmeticExpr.getCommonType(t1, t2)); rhsType = lhsType; break; case MOD: lhsType = ArithmeticExpr.getCommonType(t1, t2); rhsType = lhsType; break; case DIVIDE: lhsType = ArithmeticExpr.getCommonType(t1, t2); if (lhsType.isFixedPointType()) { lhsType = Type.DOUBLE; } rhsType = lhsType; break; case INT_DIVIDE: case BITAND: case BITOR: case BITXOR: lhsType = ArithmeticExpr.getCommonType(t1, t2); if (!lhsType.isFixedPointType()) { lhsType = Type.BIGINT; } rhsType = lhsType; break; case BIT_SHIFT_LEFT: case BIT_SHIFT_RIGHT: case BIT_SHIFT_RIGHT_LOGICAL: lhsType = t1; rhsType = Type.BIGINT; break; default: throw unsupportedException("Unknown arithmetic operation " + op + " in: " + node); } if (node.getChild(0).getType().equals(Type.NULL) && node.getChild(1).getType().equals(Type.NULL)) { lhsType = Type.NULL; rhsType = Type.NULL; } if (!Type.NULL.equals(node.getChild(0).getType()) && !Type.canCastTo(t1, lhsType)) { throw new SemanticException( "cast type " + node.getChild(0).getType().toSql() + " with type " + lhsType.toSql() + " is invalid."); } if (!Type.NULL.equals(node.getChild(1).getType()) && !Type.canCastTo(t2, rhsType)) { throw new SemanticException( "cast type " + node.getChild(1).getType().toSql() + " with type " + rhsType.toSql() + " is invalid."); } Function fn = Expr.getBuiltinFunction(op.getName(), new Type[] {lhsType, rhsType}, Function.CompareMode.IS_SUPERTYPE_OF); /* * commonType is the common type of the parameters of the function, * and fn.getReturnType() is the return type of the function after execution * So we use fn.getReturnType() as node type */ node.setType(fn.getReturnType()); node.setFn(fn); } else if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.UNARY_PREFIX) { Function fn = Expr.getBuiltinFunction( node.getOp().getName(), new Type[] {Type.BIGINT}, Function.CompareMode.IS_SUPERTYPE_OF); node.setType(Type.BIGINT); node.setFn(fn); } else if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.UNARY_POSTFIX) { throw unsupportedException("not yet implemented: expression analyzer for " + node.getClass().getName()); } else { throw unsupportedException("not yet implemented: expression analyzer for " + node.getClass().getName()); } return null; } List<String> addDateFunctions = Lists.newArrayList(FunctionSet.DATE_ADD, FunctionSet.ADDDATE, FunctionSet.DAYS_ADD, FunctionSet.TIMESTAMPADD); List<String> subDateFunctions = Lists.newArrayList(FunctionSet.DATE_SUB, FunctionSet.SUBDATE, FunctionSet.DAYS_SUB); @Override public Void visitTimestampArithmeticExpr(TimestampArithmeticExpr node, Scope scope) { node.setChild(0, TypeManager.addCastExpr(node.getChild(0), Type.DATETIME)); String funcOpName; if (node.getFuncName() != null) { if (ADD_DATE_FUNCTIONS.contains(node.getFuncName())) { funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "add"); } else if (SUB_DATE_FUNCTIONS.contains(node.getFuncName())) { funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "sub"); } else { node.setChild(1, TypeManager.addCastExpr(node.getChild(1), Type.DATETIME)); funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "diff"); } } else { funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), (node.getOp() == ArithmeticExpr.Operator.ADD) ? "add" : "sub"); } Type[] argumentTypes = node.getChildren().stream().map(Expr::getType) .toArray(Type[]::new); Function fn = Expr.getBuiltinFunction(funcOpName.toLowerCase(), argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn == null) { throw new SemanticException("No matching function with signature: %s(%s).", funcOpName, Joiner.on(", ") .join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList()))); } node.setType(fn.getReturnType()); node.setFn(fn); return null; } @Override public Void visitExistsPredicate(ExistsPredicate node, Scope scope) { predicateBaseAndCheck(node); return null; } @Override public Void visitInPredicate(InPredicate node, Scope scope) { predicateBaseAndCheck(node); List<Expr> queryExpressions = Lists.newArrayList(); node.collect(arg -> arg instanceof Subquery, queryExpressions); if (queryExpressions.size() > 0 && node.getChildren().size() > 2) { throw new SemanticException("In Predicate only support literal expression list"); } List<Type> list = node.getChildren().stream().map(Expr::getType).collect(Collectors.toList()); Type compatibleType = TypeManager.getCompatibleTypeForBetweenAndIn(list); for (Type type : list) { if (type.isJsonType()) { throw new SemanticException("InPredicate of JSON is not supported"); } if (!Type.canCastTo(type, compatibleType)) { throw new SemanticException( "in predicate type " + type.toSql() + " with type " + compatibleType.toSql() + " is invalid."); } } return null; } @Override public Void visitMultiInPredicate(MultiInPredicate node, Scope scope) { predicateBaseAndCheck(node); List<Type> leftTypes = node.getChildren().stream().limit(node.getNumberOfColumns()).map(Expr::getType) .collect(Collectors.toList()); Subquery inSubquery = (Subquery) node.getChild(node.getNumberOfColumns()); List<Type> rightTypes = inSubquery.getQueryStatement().getQueryRelation().getOutputExpression().stream().map(Expr::getType). collect(Collectors.toList()); if (leftTypes.size() != rightTypes.size()) { throw new SemanticException( "subquery must return the same number of columns as provided by the IN predicate"); } for (int i = 0; i < rightTypes.size(); ++i) { if (leftTypes.get(i).isJsonType() || rightTypes.get(i).isJsonType() || leftTypes.get(i).isMapType() || rightTypes.get(i).isMapType() || leftTypes.get(i).isStructType() || rightTypes.get(i).isStructType()) { throw new SemanticException("InPredicate of JSON, Map, Struct types is not supported"); } if (!Type.canCastTo(leftTypes.get(i), rightTypes.get(i))) { throw new SemanticException( "in predicate type " + leftTypes.get(i).toSql() + " with type " + rightTypes.get(i).toSql() + " is invalid."); } } return null; } @Override public Void visitLiteral(LiteralExpr node, Scope scope) { if (node instanceof LargeIntLiteral) { BigInteger value = ((LargeIntLiteral) node).getValue(); if (value.compareTo(LargeIntLiteral.LARGE_INT_MIN) < 0 || value.compareTo(LargeIntLiteral.LARGE_INT_MAX) > 0) { throw new SemanticException("Number Overflow. literal: " + value); } } return null; } @Override public Void visitIsNullPredicate(IsNullPredicate node, Scope scope) { predicateBaseAndCheck(node); return null; } @Override public Void visitLikePredicate(LikePredicate node, Scope scope) { predicateBaseAndCheck(node); Type type1 = node.getChild(0).getType(); Type type2 = node.getChild(1).getType(); if (!type1.isStringType() && !type1.isNull()) { throw new SemanticException( "left operand of " + node.getOp().toString() + " must be of type STRING: " + AstToStringBuilder.toString(node)); } if (!type2.isStringType() && !type2.isNull()) { throw new SemanticException( "right operand of " + node.getOp().toString() + " must be of type STRING: " + AstToStringBuilder.toString(node)); } if (LikePredicate.Operator.REGEXP.equals(node.getOp()) && !type2.isNull() && node.getChild(1).isLiteral()) { try { Pattern.compile(((StringLiteral) node.getChild(1)).getValue()); } catch (PatternSyntaxException e) { throw new SemanticException( "Invalid regular expression in '" + AstToStringBuilder.toString(node) + "'"); } } return null; } private void predicateBaseAndCheck(Predicate node) { node.setType(Type.BOOLEAN); for (Expr expr : node.getChildren()) { if (expr.getType().isOnlyMetricType() || (expr.getType().isComplexType() && !(node instanceof IsNullPredicate))) { throw new SemanticException( "HLL, BITMAP, PERCENTILE and ARRAY, MAP, STRUCT type couldn't as Predicate"); } } } @Override public Void visitCastExpr(CastExpr cast, Scope context) { Type castType; if (cast.isImplicit()) { castType = cast.getType(); } else { castType = cast.getTargetTypeDef().getType(); } if (!Type.canCastTo(cast.getChild(0).getType(), castType)) { throw new SemanticException("Invalid type cast from " + cast.getChild(0).getType().toSql() + " to " + castType.toSql() + " in sql `" + AstToStringBuilder.toString(cast.getChild(0)).replace("%", "%%") + "`"); } cast.setType(castType); return null; } @Override private Function getStrToDateFunction(FunctionCallExpr node, Type[] argumentTypes) { /* * @TODO: Determine the return type of this function * If is format is constant and don't contains time part, return date type, to compatible with mysql. * In fact we don't want to support str_to_date return date like mysql, reason: * 1. The return type of FE/BE str_to_date function signature is datetime, return date * let type different, it's will throw unpredictable error * 2. Support return date and datetime at same time in one function is complicated. * 3. The meaning of the function is confusing. In mysql, will return date if format is a constant * string and it's not contains "%H/%M/%S" pattern, but it's a trick logic, if format is a variable * expression, like: str_to_date(col1, col2), and the col2 is '%Y%m%d', the result always be * datetime. */ Function fn = Expr.getBuiltinFunction(node.getFnName().getFunction(), argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn == null) { return null; } if (!node.getChild(1).isConstant()) { return fn; } ExpressionMapping expressionMapping = new ExpressionMapping(new Scope(RelationId.anonymous(), new RelationFields()), com.google.common.collect.Lists.newArrayList()); ScalarOperator format = SqlToScalarOperatorTranslator.translate(node.getChild(1), expressionMapping, new ColumnRefFactory()); if (format.isConstantRef() && !HAS_TIME_PART.matcher(format.toString()).matches()) { return Expr.getBuiltinFunction("str2date", argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } return fn; } Function getDecimalV3Function(FunctionCallExpr node, Type[] argumentTypes) { Function fn; String fnName = node.getFnName().getFunction(); Type commonType = DecimalV3FunctionAnalyzer.normalizeDecimalArgTypes(argumentTypes, fnName); fn = Expr.getBuiltinFunction(fnName, argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn == null) { fn = AnalyzerUtils.getUdfFunction(session, node.getFnName(), argumentTypes); } if (fn == null) { throw new SemanticException("No matching function with signature: %s(%s).", fnName, node.getParams().isStar() ? "*" : Joiner.on(", ") .join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList()))); } if (DecimalV3FunctionAnalyzer.DECIMAL_AGG_FUNCTION.contains(fnName)) { Type argType = node.getChild(0).getType(); if (DecimalV3FunctionAnalyzer.DECIMAL_AGG_VARIANCE_STDDEV_TYPE .contains(fnName) && argType.isDecimalV3()) { argType = ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL128, 38, 9); node.setChild(0, TypeManager.addCastExpr(node.getChild(0), argType)); } fn = DecimalV3FunctionAnalyzer .rectifyAggregationFunction((AggregateFunction) fn, argType, commonType); } else if (DecimalV3FunctionAnalyzer.DECIMAL_UNARY_FUNCTION_SET.contains(fnName) || DecimalV3FunctionAnalyzer.DECIMAL_IDENTICAL_TYPE_FUNCTION_SET.contains(fnName) || FunctionSet.IF.equals(fnName) || FunctionSet.MAX_BY.equals(fnName)) { List<Type> argTypes; if (FunctionSet.MONEY_FORMAT.equals(fnName)) { argTypes = Arrays.asList(argumentTypes); } else { argTypes = Arrays.stream(fn.getArgs()).map(t -> t.isDecimalV3() ? commonType : t) .collect(Collectors.toList()); } Type returnType = fn.getReturnType(); if (returnType.isDecimalV3() && commonType.isValid()) { returnType = commonType; } if (FunctionSet.MAX_BY.equals(fnName)) { AggregateFunction newFn = new AggregateFunction(fn.getFunctionName(), Arrays.asList(argumentTypes), returnType, Type.VARCHAR, fn.hasVarArgs()); newFn.setFunctionId(fn.getFunctionId()); newFn.setChecksum(fn.getChecksum()); newFn.setBinaryType(fn.getBinaryType()); newFn.setHasVarArgs(fn.hasVarArgs()); newFn.setId(fn.getId()); newFn.setUserVisible(fn.isUserVisible()); newFn.setisAnalyticFn(true); fn = newFn; return fn; } ScalarFunction newFn = new ScalarFunction(fn.getFunctionName(), argTypes, returnType, fn.getLocation(), ((ScalarFunction) fn).getSymbolName(), ((ScalarFunction) fn).getPrepareFnSymbol(), ((ScalarFunction) fn).getCloseFnSymbol()); newFn.setFunctionId(fn.getFunctionId()); newFn.setChecksum(fn.getChecksum()); newFn.setBinaryType(fn.getBinaryType()); newFn.setHasVarArgs(fn.hasVarArgs()); newFn.setId(fn.getId()); newFn.setUserVisible(fn.isUserVisible()); fn = newFn; } else if (FunctionSet.decimalRoundFunctions.contains(fnName)) { List<Type> argTypes = Arrays.stream(fn.getArgs()).map(t -> t.isDecimalV3() ? commonType : t) .collect(Collectors.toList()); fn = DecimalV3FunctionAnalyzer.getFunctionOfRound(node, fn, argTypes); } return fn; } @Override public Void visitGroupingFunctionCall(GroupingFunctionCallExpr node, Scope scope) { if (node.getChildren().size() < 1) { throw new SemanticException("GROUPING functions required at least one parameters"); } if (node.getChildren().stream().anyMatch(e -> !(e instanceof SlotRef))) { throw new SemanticException("grouping functions only support column."); } Type[] childTypes = new Type[1]; childTypes[0] = Type.BIGINT; Function fn = Expr.getBuiltinFunction(node.getFnName().getFunction(), childTypes, Function.CompareMode.IS_IDENTICAL); node.setFn(fn); node.setType(fn.getReturnType()); return null; } @Override public Void visitCaseWhenExpr(CaseExpr node, Scope context) { int start = 0; int end = node.getChildren().size(); Expr caseExpr = null; Expr elseExpr = null; if (node.hasCaseExpr()) { caseExpr = node.getChild(0); start++; } if (node.hasElseExpr()) { elseExpr = node.getChild(end - 1); end--; } if (node.getChildren().stream().anyMatch(d -> !d.getType().isScalarType())) { throw new SemanticException("case-when only support scalar type"); } List<Type> whenTypes = Lists.newArrayList(); if (null != caseExpr) { whenTypes.add(caseExpr.getType()); } for (int i = start; i < end; i = i + 2) { whenTypes.add(node.getChild(i).getType()); } Type compatibleType = Type.NULL; if (null != caseExpr) { compatibleType = TypeManager.getCompatibleTypeForCaseWhen(whenTypes); } for (Type type : whenTypes) { if (!Type.canCastTo(type, compatibleType)) { throw new SemanticException("Invalid when type cast " + type.toSql() + " to " + compatibleType.toSql()); } } List<Type> thenTypes = Lists.newArrayList(); for (int i = start + 1; i < end; i = i + 2) { thenTypes.add(node.getChild(i).getType()); } if (null != elseExpr) { thenTypes.add(elseExpr.getType()); } Type returnType = thenTypes.stream().allMatch(Type.NULL::equals) ? Type.BOOLEAN : TypeManager.getCompatibleTypeForCaseWhen(thenTypes); for (Type type : thenTypes) { if (!Type.canCastTo(type, returnType)) { throw new SemanticException("Invalid then type cast " + type.toSql() + " to " + returnType.toSql()); } } node.setType(returnType); return null; } @Override public Void visitSubquery(Subquery node, Scope context) { QueryAnalyzer queryAnalyzer = new QueryAnalyzer(session); queryAnalyzer.analyze(node.getQueryStatement(), context); node.setType(node.getQueryStatement().getQueryRelation().getRelationFields().getFieldByIndex(0).getType()); return null; } @Override public Void visitAnalyticExpr(AnalyticExpr node, Scope context) { visit(node.getFnCall(), context); node.setType(node.getFnCall().getType()); if (node.getWindow() != null) { if (node.getWindow().getLeftBoundary() != null && node.getWindow().getLeftBoundary().getExpr() != null) { visit(node.getWindow().getLeftBoundary().getExpr(), context); } if (node.getWindow().getRightBoundary() != null && node.getWindow().getRightBoundary().getExpr() != null) { visit(node.getWindow().getRightBoundary().getExpr(), context); } } node.getPartitionExprs().forEach(e -> visit(e, context)); node.getOrderByElements().stream().map(OrderByElement::getExpr).forEach(e -> visit(e, context)); verifyAnalyticExpression(node); return null; } @Override public Void visitInformationFunction(InformationFunction node, Scope context) { String funcType = node.getFuncType(); if (funcType.equalsIgnoreCase("DATABASE") || funcType.equalsIgnoreCase("SCHEMA")) { node.setType(Type.VARCHAR); node.setStrValue(ClusterNamespace.getNameFromFullName(session.getDatabase())); } else if (funcType.equalsIgnoreCase("USER")) { node.setType(Type.VARCHAR); node.setStrValue(session.getUserIdentity().toString()); } else if (funcType.equalsIgnoreCase("CURRENT_USER")) { node.setType(Type.VARCHAR); node.setStrValue(session.getCurrentUserIdentity().toString()); } else if (funcType.equalsIgnoreCase("CURRENT_ROLE")) { node.setType(Type.VARCHAR); PrivilegeManager manager = session.getGlobalStateMgr().getPrivilegeManager(); List<String> roleName = new ArrayList<>(); try { for (Long roleId : session.getCurrentRoleIds()) { RolePrivilegeCollection rolePrivilegeCollection = manager.getRolePrivilegeCollectionUnlocked(roleId, true); roleName.add(rolePrivilegeCollection.getName()); } } catch (PrivilegeException e) { throw new SemanticException(e.getMessage()); } if (roleName.isEmpty()) { node.setStrValue("NONE"); } else { node.setStrValue(Joiner.on(", ").join(roleName)); } } else if (funcType.equalsIgnoreCase("CONNECTION_ID")) { node.setType(Type.BIGINT); node.setIntValue(session.getConnectionId()); node.setStrValue(""); } return null; } @Override public Void visitVariableExpr(VariableExpr node, Scope context) { try { if (node.getSetType().equals(SetType.USER)) { UserVariable userVariable = session.getUserVariables(node.getName()); if (userVariable == null) { node.setType(Type.STRING); node.setIsNull(); return null; } Type variableType = userVariable.getEvaluatedExpression().getType(); node.setType(variableType); if (userVariable.getEvaluatedExpression() instanceof NullLiteral) { node.setIsNull(); } else { node.setValue(userVariable.getEvaluatedExpression().getRealObjectValue()); } } else { VariableMgr.fillValue(session.getSessionVariable(), node); if (!Strings.isNullOrEmpty(node.getName()) && node.getName().equalsIgnoreCase(SessionVariable.SQL_MODE)) { node.setType(Type.VARCHAR); node.setValue(SqlModeHelper.decode((long) node.getValue())); } } } catch (AnalysisException | DdlException e) { throw new SemanticException(e.getMessage()); } return null; } @Override public Void visitDefaultValueExpr(DefaultValueExpr node, Scope context) { node.setType(Type.VARCHAR); return null; } @Override public Void visitCloneExpr(CloneExpr node, Scope context) { return null; } }
class Visitor extends AstVisitor<Void, Scope> { private static final List<String> ADD_DATE_FUNCTIONS = Lists.newArrayList(FunctionSet.DATE_ADD, FunctionSet.ADDDATE, FunctionSet.DAYS_ADD, FunctionSet.TIMESTAMPADD); private static final List<String> SUB_DATE_FUNCTIONS = Lists.newArrayList(FunctionSet.DATE_SUB, FunctionSet.SUBDATE, FunctionSet.DAYS_SUB); private final AnalyzeState analyzeState; private final ConnectContext session; public Visitor(AnalyzeState analyzeState, ConnectContext session) { this.analyzeState = analyzeState; this.session = session; } @Override public Void visitExpression(Expr node, Scope scope) { throw new SemanticException("not yet implemented: expression analyzer for " + node.getClass().getName(), node.getPos()); } private void handleResolvedField(SlotRef slot, ResolvedField resolvedField) { analyzeState.addColumnReference(slot, FieldId.from(resolvedField)); } @Override public Void visitSubfieldExpr(SubfieldExpr node, Scope scope) { Expr child = node.getChild(0); if (!child.getType().isStructType()) { throw new SemanticException(child.toSql() + " must be a struct type, check if you are using `'`", child.getPos()); } List<String> fieldNames = node.getFieldNames(); Type tmpType = child.getType(); for (String fieldName : fieldNames) { StructType structType = (StructType) tmpType; StructField structField = structType.getField(fieldName); if (structField == null) { throw new SemanticException(String.format("Struct subfield '%s' cannot be resolved", fieldName), node.getPos()); } tmpType = structField.getType(); } node.setType(tmpType); return null; } @Override public Void visitSlot(SlotRef node, Scope scope) { ResolvedField resolvedField = scope.resolveField(node); node.setType(resolvedField.getField().getType()); node.setTblName(resolvedField.getField().getRelationAlias()); if (node.getType().isStructType()) { node.setCol(resolvedField.getField().getName()); node.setLabel(resolvedField.getField().getName()); if (resolvedField.getField().getTmpUsedStructFieldPos().size() > 0) { node.setUsedStructFieldPos(resolvedField.getField().getTmpUsedStructFieldPos()); node.resetStructInfo(); } } handleResolvedField(node, resolvedField); return null; } @Override public Void visitFieldReference(FieldReference node, Scope scope) { Field field = scope.getRelationFields().getFieldByIndex(node.getFieldIndex()); node.setType(field.getType()); return null; } @Override public Void visitArrayExpr(ArrayExpr node, Scope scope) { if (!node.getChildren().isEmpty()) { try { Type targetItemType; if (node.getType() != null) { targetItemType = ((ArrayType) node.getType()).getItemType(); } else { targetItemType = TypeManager.getCommonSuperType( node.getChildren().stream().map(Expr::getType).collect(Collectors.toList())); } for (int i = 0; i < node.getChildren().size(); i++) { if (!node.getChildren().get(i).getType().matchesType(targetItemType)) { node.castChild(targetItemType, i); } } node.setType(new ArrayType(targetItemType)); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } else { node.setType(Type.ARRAY_NULL); } return null; } @Override public Void visitMapExpr(MapExpr node, Scope scope) { if (!node.getChildren().isEmpty()) { Type keyType = Type.NULL; Type valueType = Type.NULL; if (node.getKeyExpr() != null) { keyType = node.getKeyExpr().getType(); } if (node.getValueExpr() != null) { valueType = node.getValueExpr().getType(); } node.setType(new MapType(keyType, valueType)); } else { node.setType(new MapType(Type.NULL, Type.NULL)); } return null; } @Override public Void visitCollectionElementExpr(CollectionElementExpr node, Scope scope) { Expr expr = node.getChild(0); Expr subscript = node.getChild(1); if (!expr.getType().isArrayType() && !expr.getType().isMapType()) { throw new SemanticException("cannot subscript type " + expr.getType() + " because it is not an array or a map", expr.getPos()); } if (expr.getType().isArrayType()) { if (!subscript.getType().isNumericType()) { throw new SemanticException("array subscript must have type integer", subscript.getPos()); } try { if (subscript.getType().getPrimitiveType() != PrimitiveType.INT) { node.castChild(Type.INT, 1); } node.setType(((ArrayType) expr.getType()).getItemType()); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } else { try { if (subscript.getType().getPrimitiveType() != ((MapType) expr.getType()).getKeyType().getPrimitiveType()) { node.castChild(((MapType) expr.getType()).getKeyType(), 1); } node.setType(((MapType) expr.getType()).getValueType()); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } } return null; } @Override public Void visitArraySliceExpr(ArraySliceExpr node, Scope scope) { if (!node.getChild(0).getType().isArrayType()) { throw new SemanticException("cannot subscript type" + node.getChild(0).getType() + " because it is not an array", node.getChild(0).getPos()); } node.setType(node.getChild(0).getType()); return null; } @Override public Void visitArrowExpr(ArrowExpr node, Scope scope) { Expr item = node.getChild(0); Expr key = node.getChild(1); if (!key.isLiteral() || !key.getType().isStringType()) { throw new SemanticException("right operand of -> should be string literal, but got " + key, key.getPos()); } if (!item.getType().isJsonType()) { throw new SemanticException( "-> operator could only be used for json column, but got " + item.getType(), item.getPos()); } node.setType(Type.JSON); return null; } @Override public Void visitLambdaFunctionExpr(LambdaFunctionExpr node, Scope scope) { if (scope.getLambdaInputs().size() == 0) { throw new SemanticException( "Lambda Functions can only be used in high-order functions with arrays/maps", node.getPos()); } if (scope.getLambdaInputs().size() != node.getChildren().size() - 1) { throw new SemanticException("Lambda arguments should equal to lambda input arrays", node.getPos()); } Set<String> set = new HashSet<>(); List<LambdaArgument> args = Lists.newArrayList(); for (int i = 1; i < node.getChildren().size(); ++i) { args.add((LambdaArgument) node.getChild(i)); String name = ((LambdaArgument) node.getChild(i)).getName(); if (set.contains(name)) { throw new SemanticException("Lambda argument: " + name + " is duplicated", node.getChild(i).getPos()); } set.add(name); ((LambdaArgument) node.getChild(i)).setNullable(scope.getLambdaInputs().get(i - 1).isNullable()); node.getChild(i).setType(scope.getLambdaInputs().get(i - 1).getType()); } Scope lambdaScope = new Scope(args, scope); ExpressionAnalyzer.analyzeExpression(node.getChild(0), this.analyzeState, lambdaScope, this.session); node.setType(Type.FUNCTION); scope.clearLambdaInputs(); return null; } @Override public Void visitCompoundPredicate(CompoundPredicate node, Scope scope) { for (int i = 0; i < node.getChildren().size(); i++) { Type type = node.getChild(i).getType(); if (!type.isBoolean() && !type.isNull()) { String msg = String.format("Operand '%s' part of predicate " + "'%s' should return type 'BOOLEAN' but returns type '%s'", AstToStringBuilder.toString(node), AstToStringBuilder.toString(node.getChild(i)), type.toSql()); throw new SemanticException(msg, node.getChild(i).getPos()); } } node.setType(Type.BOOLEAN); return null; } @Override public Void visitBetweenPredicate(BetweenPredicate node, Scope scope) { predicateBaseAndCheck(node); List<Type> list = node.getChildren().stream().map(Expr::getType).collect(Collectors.toList()); Type compatibleType = TypeManager.getCompatibleTypeForBetweenAndIn(list); for (Type type : list) { if (!Type.canCastTo(type, compatibleType)) { throw new SemanticException( "between predicate type " + type.toSql() + " with type " + compatibleType.toSql() + " is invalid", node.getPos()); } } return null; } @Override public Void visitBinaryPredicate(BinaryPredicate node, Scope scope) { Type type1 = node.getChild(0).getType(); Type type2 = node.getChild(1).getType(); Type compatibleType = TypeManager.getCompatibleTypeForBinary(node.getOp().isNotRangeComparison(), type1, type2); final String ERROR_MSG = "Column type %s does not support binary predicate operation"; if (!Type.canCastTo(type1, compatibleType)) { throw new SemanticException(String.format(ERROR_MSG, type1.toSql()), node.getPos()); } if (!Type.canCastTo(type2, compatibleType)) { throw new SemanticException(String.format(ERROR_MSG, type1.toSql()), node.getPos()); } node.setType(Type.BOOLEAN); return null; } @Override public Void visitArithmeticExpr(ArithmeticExpr node, Scope scope) { if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.BINARY_INFIX) { ArithmeticExpr.Operator op = node.getOp(); Type t1 = node.getChild(0).getType().getNumResultType(); Type t2 = node.getChild(1).getType().getNumResultType(); if (t1.isDecimalV3() || t2.isDecimalV3()) { try { node.rewriteDecimalOperation(); } catch (AnalysisException ex) { throw new SemanticException(ex.getMessage()); } Type lhsType = node.getChild(0).getType(); Type rhsType = node.getChild(1).getType(); Type resultType = node.getType(); Type[] args = {lhsType, rhsType}; Function fn = Expr.getBuiltinFunction(op.getName(), args, Function.CompareMode.IS_IDENTICAL); Function newFn = new ScalarFunction(fn.getFunctionName(), args, resultType, fn.hasVarArgs()); node.setType(resultType); node.setFn(newFn); return null; } Type lhsType; Type rhsType; switch (op) { case MULTIPLY: case ADD: case SUBTRACT: lhsType = ArithmeticExpr.getBiggerType(ArithmeticExpr.getCommonType(t1, t2)); rhsType = lhsType; break; case MOD: lhsType = ArithmeticExpr.getCommonType(t1, t2); rhsType = lhsType; break; case DIVIDE: lhsType = ArithmeticExpr.getCommonType(t1, t2); if (lhsType.isFixedPointType()) { lhsType = Type.DOUBLE; } rhsType = lhsType; break; case INT_DIVIDE: case BITAND: case BITOR: case BITXOR: lhsType = ArithmeticExpr.getCommonType(t1, t2); if (!lhsType.isFixedPointType()) { lhsType = Type.BIGINT; } rhsType = lhsType; break; case BIT_SHIFT_LEFT: case BIT_SHIFT_RIGHT: case BIT_SHIFT_RIGHT_LOGICAL: lhsType = t1; rhsType = Type.BIGINT; break; default: throw new SemanticException("Unknown arithmetic operation " + op + " in: " + node, node.getPos()); } if (node.getChild(0).getType().equals(Type.NULL) && node.getChild(1).getType().equals(Type.NULL)) { lhsType = Type.NULL; rhsType = Type.NULL; } if (lhsType.isInvalid() || rhsType.isInvalid()) { throw new SemanticException("Any function type can not cast to " + Type.INVALID.toSql()); } if (!Type.NULL.equals(node.getChild(0).getType()) && !Type.canCastTo(t1, lhsType)) { throw new SemanticException( "cast type " + node.getChild(0).getType().toSql() + " with type " + lhsType.toSql() + " is invalid", node.getPos()); } if (!Type.NULL.equals(node.getChild(1).getType()) && !Type.canCastTo(t2, rhsType)) { throw new SemanticException( "cast type " + node.getChild(1).getType().toSql() + " with type " + rhsType.toSql() + " is invalid", node.getPos()); } Function fn = Expr.getBuiltinFunction(op.getName(), new Type[] {lhsType, rhsType}, Function.CompareMode.IS_SUPERTYPE_OF); if (fn == null) { throw new SemanticException(String.format( "No matching function '%s' with operand types %s and %s", node.getOp().getName(), t1, t2)); } /* * commonType is the common type of the parameters of the function, * and fn.getReturnType() is the return type of the function after execution * So we use fn.getReturnType() as node type */ node.setType(fn.getReturnType()); node.setFn(fn); } else if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.UNARY_PREFIX) { Function fn = Expr.getBuiltinFunction( node.getOp().getName(), new Type[] {Type.BIGINT}, Function.CompareMode.IS_SUPERTYPE_OF); node.setType(Type.BIGINT); node.setFn(fn); } else if (node.getOp().getPos() == ArithmeticExpr.OperatorPosition.UNARY_POSTFIX) { throw new SemanticException("not yet implemented: expression analyzer for " + node.getClass().getName(), node.getPos()); } else { throw new SemanticException("not yet implemented: expression analyzer for " + node.getClass().getName(), node.getPos()); } return null; } @Override public Void visitTimestampArithmeticExpr(TimestampArithmeticExpr node, Scope scope) { node.setChild(0, TypeManager.addCastExpr(node.getChild(0), Type.DATETIME)); String funcOpName; if (node.getFuncName() != null) { if (ADD_DATE_FUNCTIONS.contains(node.getFuncName())) { funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "add"); } else if (SUB_DATE_FUNCTIONS.contains(node.getFuncName())) { funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "sub"); } else { node.setChild(1, TypeManager.addCastExpr(node.getChild(1), Type.DATETIME)); funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), "diff"); } } else { funcOpName = String.format("%sS_%s", node.getTimeUnitIdent(), (node.getOp() == ArithmeticExpr.Operator.ADD) ? "add" : "sub"); } Type[] argumentTypes = node.getChildren().stream().map(Expr::getType) .toArray(Type[]::new); Function fn = Expr.getBuiltinFunction(funcOpName.toLowerCase(), argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn == null) { String msg = String.format("No matching function with signature: %s(%s)", funcOpName, Joiner.on(", ") .join(Arrays.stream(argumentTypes).map(Type::toSql).collect(Collectors.toList()))); throw new SemanticException(msg, node.getPos()); } node.setType(fn.getReturnType()); node.setFn(fn); return null; } @Override public Void visitExistsPredicate(ExistsPredicate node, Scope scope) { predicateBaseAndCheck(node); return null; } @Override public Void visitInPredicate(InPredicate node, Scope scope) { predicateBaseAndCheck(node); List<Expr> queryExpressions = Lists.newArrayList(); node.collect(arg -> arg instanceof Subquery, queryExpressions); if (queryExpressions.size() > 0 && node.getChildren().size() > 2) { throw new SemanticException("In Predicate only support literal expression list", node.getPos()); } List<Type> list = node.getChildren().stream().map(Expr::getType).collect(Collectors.toList()); Type compatibleType = TypeManager.getCompatibleTypeForBetweenAndIn(list); for (Expr child : node.getChildren()) { Type type = child.getType(); if (type.isJsonType()) { throw new SemanticException("InPredicate of JSON is not supported", child.getPos()); } if (!Type.canCastTo(type, compatibleType)) { throw new SemanticException( "in predicate type " + type.toSql() + " with type " + compatibleType.toSql() + " is invalid", child.getPos()); } } return null; } @Override public Void visitMultiInPredicate(MultiInPredicate node, Scope scope) { predicateBaseAndCheck(node); List<Type> leftTypes = node.getChildren().stream().limit(node.getNumberOfColumns()).map(Expr::getType) .collect(Collectors.toList()); Subquery inSubquery = (Subquery) node.getChild(node.getNumberOfColumns()); List<Type> rightTypes = inSubquery.getQueryStatement().getQueryRelation().getOutputExpression().stream().map(Expr::getType). collect(Collectors.toList()); if (leftTypes.size() != rightTypes.size()) { throw new SemanticException( "subquery must return the same number of columns as provided by the IN predicate", node.getPos()); } for (int i = 0; i < rightTypes.size(); ++i) { if (leftTypes.get(i).isJsonType() || rightTypes.get(i).isJsonType() || leftTypes.get(i).isMapType() || rightTypes.get(i).isMapType() || leftTypes.get(i).isStructType() || rightTypes.get(i).isStructType()) { throw new SemanticException("InPredicate of JSON, Map, Struct types is not supported"); } if (!Type.canCastTo(leftTypes.get(i), rightTypes.get(i))) { throw new SemanticException( "in predicate type " + leftTypes.get(i).toSql() + " with type " + rightTypes.get(i).toSql() + " is invalid"); } } return null; } @Override public Void visitLiteral(LiteralExpr node, Scope scope) { if (node instanceof LargeIntLiteral) { BigInteger value = ((LargeIntLiteral) node).getValue(); if (value.compareTo(LargeIntLiteral.LARGE_INT_MIN) < 0 || value.compareTo(LargeIntLiteral.LARGE_INT_MAX) > 0) { throw new SemanticException(PARSER_ERROR_MSG.numOverflow(value.toString()), node.getPos()); } } return null; } @Override public Void visitIsNullPredicate(IsNullPredicate node, Scope scope) { predicateBaseAndCheck(node); return null; } @Override public Void visitLikePredicate(LikePredicate node, Scope scope) { predicateBaseAndCheck(node); Type type1 = node.getChild(0).getType(); Type type2 = node.getChild(1).getType(); if (!type1.isStringType() && !type1.isNull()) { throw new SemanticException( "left operand of " + node.getOp().toString() + " must be of type STRING: " + AstToStringBuilder.toString(node), node.getPos()); } if (!type2.isStringType() && !type2.isNull()) { throw new SemanticException( "right operand of " + node.getOp().toString() + " must be of type STRING: " + AstToStringBuilder.toString(node), node.getPos()); } if (LikePredicate.Operator.REGEXP.equals(node.getOp()) && !type2.isNull() && node.getChild(1).isLiteral()) { try { Pattern.compile(((StringLiteral) node.getChild(1)).getValue()); } catch (PatternSyntaxException e) { throw new SemanticException( "Invalid regular expression in '" + AstToStringBuilder.toString(node) + "'", node.getPos()); } } return null; } private void predicateBaseAndCheck(Predicate node) { node.setType(Type.BOOLEAN); for (Expr expr : node.getChildren()) { if (expr.getType().isOnlyMetricType() || (expr.getType().isComplexType() && !(node instanceof IsNullPredicate))) { throw new SemanticException( "HLL, BITMAP, PERCENTILE and ARRAY, MAP, STRUCT type couldn't as Predicate", node.getPos()); } } } @Override public Void visitCastExpr(CastExpr cast, Scope context) { Type castType; if (cast.isImplicit()) { castType = cast.getType(); } else { castType = cast.getTargetTypeDef().getType(); } if (!Type.canCastTo(cast.getChild(0).getType(), castType)) { throw new SemanticException("Invalid type cast from " + cast.getChild(0).getType().toSql() + " to " + castType.toSql() + " in sql `" + AstToStringBuilder.toString(cast.getChild(0)).replace("%", "%%") + "`", cast.getPos()); } cast.setType(castType); return null; } @Override private void checkFunction(String fnName, FunctionCallExpr node) { switch (fnName) { case FunctionSet.TIME_SLICE: case FunctionSet.DATE_SLICE: if (!(node.getChild(1) instanceof IntLiteral)) { throw new SemanticException( fnName + " requires second parameter must be a constant interval", node.getPos()); } if (((IntLiteral) node.getChild(1)).getValue() <= 0) { throw new SemanticException( fnName + " requires second parameter must be greater than 0", node.getPos()); } break; case FunctionSet.ARRAY_FILTER: if (node.getChildren().size() != 2) { throw new SemanticException(fnName + " should have 2 array inputs or lambda functions", node.getPos()); } if (!node.getChild(0).getType().isArrayType() && !node.getChild(0).getType().isNull()) { throw new SemanticException("The first input of " + fnName + " should be an array or a lambda function", node.getPos()); } if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) { throw new SemanticException("The second input of " + fnName + " should be an array or a lambda function", node.getPos()); } if (!Type.canCastTo(node.getChild(1).getType(), Type.ARRAY_BOOLEAN)) { throw new SemanticException("The second input of array_filter " + node.getChild(1).getType().toString() + " can't cast to ARRAY<BOOL>", node.getPos()); } break; case FunctionSet.ARRAY_SORTBY: if (node.getChildren().size() != 2) { throw new SemanticException(fnName + " should have 2 array inputs or lambda functions", node.getPos()); } if (!node.getChild(0).getType().isArrayType() && !node.getChild(0).getType().isNull()) { throw new SemanticException("The first input of " + fnName + " should be an array or a lambda function", node.getPos()); } if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) { throw new SemanticException("The second input of " + fnName + " should be an array or a lambda function", node.getPos()); } break; case FunctionSet.ARRAY_CONCAT: if (node.getChildren().size() < 2) { throw new SemanticException(fnName + " should have at least two inputs", node.getPos()); } break; case FunctionSet.ARRAY_GENERATE: if (node.getChildren().size() < 1 || node.getChildren().size() > 3) { throw new SemanticException(fnName + " has wrong input numbers"); } for (Expr expr : node.getChildren()) { if ((expr instanceof SlotRef) && node.getChildren().size() != 3) { throw new SemanticException(fnName + " with IntColumn doesn't support default parameters"); } if (!(expr instanceof IntLiteral) && !(expr instanceof LargeIntLiteral) && !(expr instanceof SlotRef) && !(expr instanceof NullLiteral)) { throw new SemanticException(fnName + "'s parameter only support Integer"); } } break; case FunctionSet.MAP_FILTER: if (node.getChildren().size() != 2) { throw new SemanticException(fnName + " should have 2 inputs, " + "but there are just " + node.getChildren().size() + " inputs."); } if (!node.getChild(0).getType().isMapType() && !node.getChild(0).getType().isNull()) { throw new SemanticException("The first input of " + fnName + " should be a map or a lambda function."); } if (!node.getChild(1).getType().isArrayType() && !node.getChild(1).getType().isNull()) { throw new SemanticException("The second input of " + fnName + " should be a array or a lambda function."); } if (!Type.canCastTo(node.getChild(1).getType(), Type.ARRAY_BOOLEAN)) { throw new SemanticException("The second input of map_filter " + node.getChild(1).getType().toString() + " can't cast to ARRAY<BOOL>"); } break; } } private Function getStrToDateFunction(FunctionCallExpr node, Type[] argumentTypes) { /* * @TODO: Determine the return type of this function * If is format is constant and don't contains time part, return date type, to compatible with mysql. * In fact we don't want to support str_to_date return date like mysql, reason: * 1. The return type of FE/BE str_to_date function signature is datetime, return date * let type different, it's will throw unpredictable error * 2. Support return date and datetime at same time in one function is complicated. * 3. The meaning of the function is confusing. In mysql, will return date if format is a constant * string and it's not contains "%H/%M/%S" pattern, but it's a trick logic, if format is a variable * expression, like: str_to_date(col1, col2), and the col2 is '%Y%m%d', the result always be * datetime. */ Function fn = Expr.getBuiltinFunction(node.getFnName().getFunction(), argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); if (fn == null) { return null; } if (!node.getChild(1).isConstant()) { return fn; } ExpressionMapping expressionMapping = new ExpressionMapping(new Scope(RelationId.anonymous(), new RelationFields()), com.google.common.collect.Lists.newArrayList()); ScalarOperator format = SqlToScalarOperatorTranslator.translate(node.getChild(1), expressionMapping, new ColumnRefFactory()); if (format.isConstantRef() && !HAS_TIME_PART.matcher(format.toString()).matches()) { return Expr.getBuiltinFunction("str2date", argumentTypes, Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF); } return fn; } private Function getArrayGenerateFunction(FunctionCallExpr node) { if (node.getChildren().size() == 1) { LiteralExpr secondParam = (LiteralExpr) node.getChild(0); node.clearChildren(); node.addChild(new IntLiteral(1)); node.addChild(secondParam); } if (node.getChildren().size() == 2) { int idx = 0; BigInteger[] childValues = new BigInteger[2]; Boolean hasNUll = false; for (Expr expr : node.getChildren()) { if (expr instanceof NullLiteral) { hasNUll = true; } else if (expr instanceof IntLiteral) { childValues[idx++] = BigInteger.valueOf(((IntLiteral) expr).getValue()); } else { childValues[idx++] = ((LargeIntLiteral) expr).getValue(); } } if (hasNUll || childValues[0].compareTo(childValues[1]) < 0) { node.addChild(new IntLiteral(1)); } else { node.addChild(new IntLiteral(-1)); } } Type[] argumentTypes = node.getChildren().stream().map(Expr::getType).toArray(Type[]::new); return Expr.getBuiltinFunction(FunctionSet.ARRAY_GENERATE, argumentTypes, Function.CompareMode.IS_SUPERTYPE_OF); } @Override public Void visitGroupingFunctionCall(GroupingFunctionCallExpr node, Scope scope) { if (node.getChildren().size() < 1) { throw new SemanticException("GROUPING functions required at least one parameters", node.getPos()); } if (node.getChildren().stream().anyMatch(e -> !(e instanceof SlotRef))) { throw new SemanticException("grouping functions only support column", node.getPos()); } Type[] childTypes = new Type[1]; childTypes[0] = Type.BIGINT; Function fn = Expr.getBuiltinFunction(node.getFnName().getFunction(), childTypes, Function.CompareMode.IS_IDENTICAL); node.setFn(fn); node.setType(fn.getReturnType()); return null; } @Override public Void visitCaseWhenExpr(CaseExpr node, Scope context) { int start = 0; int end = node.getChildren().size(); Expr caseExpr = null; Expr elseExpr = null; if (node.hasCaseExpr()) { caseExpr = node.getChild(0); start++; } if (node.hasElseExpr()) { elseExpr = node.getChild(end - 1); end--; } if (node.getChildren().stream().anyMatch(d -> !d.getType().isScalarType())) { throw new SemanticException("case-when only support scalar type", node.getPos()); } List<Type> whenTypes = Lists.newArrayList(); if (null != caseExpr) { whenTypes.add(caseExpr.getType()); } for (int i = start; i < end; i = i + 2) { whenTypes.add(node.getChild(i).getType()); } Type compatibleType = Type.NULL; if (null != caseExpr) { compatibleType = TypeManager.getCompatibleTypeForCaseWhen(whenTypes); } for (Type type : whenTypes) { if (!Type.canCastTo(type, compatibleType)) { throw new SemanticException("Invalid when type cast " + type.toSql() + " to " + compatibleType.toSql(), node.getPos()); } } List<Type> thenTypes = Lists.newArrayList(); for (int i = start + 1; i < end; i = i + 2) { thenTypes.add(node.getChild(i).getType()); } if (null != elseExpr) { thenTypes.add(elseExpr.getType()); } Type returnType = thenTypes.stream().allMatch(Type.NULL::equals) ? Type.BOOLEAN : TypeManager.getCompatibleTypeForCaseWhen(thenTypes); for (Type type : thenTypes) { if (!Type.canCastTo(type, returnType)) { throw new SemanticException("Invalid then type cast " + type.toSql() + " to " + returnType.toSql(), node.getPos()); } } node.setType(returnType); return null; } @Override public Void visitSubquery(Subquery node, Scope context) { QueryAnalyzer queryAnalyzer = new QueryAnalyzer(session); queryAnalyzer.analyze(node.getQueryStatement(), context); node.setType(node.getQueryStatement().getQueryRelation().getRelationFields().getFieldByIndex(0).getType()); return null; } @Override public Void visitAnalyticExpr(AnalyticExpr node, Scope context) { visit(node.getFnCall(), context); node.setType(node.getFnCall().getType()); if (node.getWindow() != null) { if (node.getWindow().getLeftBoundary() != null && node.getWindow().getLeftBoundary().getExpr() != null) { visit(node.getWindow().getLeftBoundary().getExpr(), context); } if (node.getWindow().getRightBoundary() != null && node.getWindow().getRightBoundary().getExpr() != null) { visit(node.getWindow().getRightBoundary().getExpr(), context); } } node.getPartitionExprs().forEach(e -> visit(e, context)); node.getOrderByElements().stream().map(OrderByElement::getExpr).forEach(e -> visit(e, context)); verifyAnalyticExpression(node); return null; } @Override public Void visitInformationFunction(InformationFunction node, Scope context) { String funcType = node.getFuncType(); if (funcType.equalsIgnoreCase("DATABASE") || funcType.equalsIgnoreCase("SCHEMA")) { node.setType(Type.VARCHAR); node.setStrValue(ClusterNamespace.getNameFromFullName(session.getDatabase())); } else if (funcType.equalsIgnoreCase("USER")) { node.setType(Type.VARCHAR); String user = session.getQualifiedUser(); String remoteIP = session.getRemoteIP(); node.setStrValue(new UserIdentity(user, remoteIP).toString()); } else if (funcType.equalsIgnoreCase("CURRENT_USER")) { node.setType(Type.VARCHAR); node.setStrValue(session.getCurrentUserIdentity().toString()); } else if (funcType.equalsIgnoreCase("CURRENT_ROLE")) { node.setType(Type.VARCHAR); AuthorizationManager manager = session.getGlobalStateMgr().getAuthorizationManager(); List<String> roleName = new ArrayList<>(); try { for (Long roleId : session.getCurrentRoleIds()) { RolePrivilegeCollection rolePrivilegeCollection = manager.getRolePrivilegeCollectionUnlocked(roleId, false); if (rolePrivilegeCollection != null) { roleName.add(rolePrivilegeCollection.getName()); } } } catch (PrivilegeException e) { throw new SemanticException(e.getMessage()); } if (roleName.isEmpty()) { node.setStrValue("NONE"); } else { node.setStrValue(Joiner.on(", ").join(roleName)); } } else if (funcType.equalsIgnoreCase("CONNECTION_ID")) { node.setType(Type.BIGINT); node.setIntValue(session.getConnectionId()); node.setStrValue(""); } else if (funcType.equalsIgnoreCase("CURRENT_CATALOG")) { node.setType(Type.VARCHAR); node.setStrValue(session.getCurrentCatalog().toString()); } return null; } @Override public Void visitVariableExpr(VariableExpr node, Scope context) { try { if (node.getSetType().equals(SetType.USER)) { UserVariable userVariable = session.getUserVariables(node.getName()); if (userVariable == null) { node.setType(Type.STRING); node.setIsNull(); return null; } Type variableType = userVariable.getEvaluatedExpression().getType(); node.setType(variableType); if (userVariable.getEvaluatedExpression() instanceof NullLiteral) { node.setIsNull(); } else { node.setValue(userVariable.getEvaluatedExpression().getRealObjectValue()); } } else { VariableMgr.fillValue(session.getSessionVariable(), node); if (!Strings.isNullOrEmpty(node.getName()) && node.getName().equalsIgnoreCase(SessionVariable.SQL_MODE)) { node.setType(Type.VARCHAR); node.setValue(SqlModeHelper.decode((long) node.getValue())); } } } catch (AnalysisException | DdlException e) { throw new SemanticException(e.getMessage()); } return null; } @Override public Void visitDefaultValueExpr(DefaultValueExpr node, Scope context) { node.setType(Type.VARCHAR); return null; } @Override public Void visitCloneExpr(CloneExpr node, Scope context) { return null; } }
Updated, remove it
private void checkAndPrepareMeta() { MetricRepo.COUNTER_UNFINISHED_RESTORE_JOB.increase(1L); Database db = globalStateMgr.getDb(dbId); if (db == null) { status = new Status(ErrCode.NOT_FOUND, "database " + dbId + " does not exist"); return; } jobId = globalStateMgr.getNextId(); if (backupMeta == null) { status = new Status(ErrCode.COMMON_ERROR, "Failed to read backup meta from file"); return; } Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { for (BackupTableInfo tblInfo : jobInfo.tables.values()) { Table tbl = db.getTable(jobInfo.getAliasByOriginNameIfSet(tblInfo.name)); if (tbl == null) { continue; } if (!tbl.isNativeTableOrMaterializedView()) { status = new Status(ErrCode.COMMON_ERROR, "Only support restore OLAP table: " + tbl.getName()); return; } OlapTable olapTbl = (OlapTable) tbl; if (olapTbl.getState() != OlapTableState.NORMAL) { status = new Status(ErrCode.COMMON_ERROR, "Table " + tbl.getName() + "'s state is not NORMAL: " + olapTbl.getState().name()); return; } if (olapTbl.existTempPartitions()) { status = new Status(ErrCode.COMMON_ERROR, "Do not support restoring table with temp partitions"); return; } olapTbl.setState(OlapTableState.RESTORE); } } finally { locker.unLockDatabase(db, LockType.WRITE); } batchTask = new AgentBatchTask(); locker.lockDatabase(db, LockType.READ); try { for (BackupTableInfo tblInfo : jobInfo.tables.values()) { Table remoteTbl = backupMeta.getTable(tblInfo.name); Preconditions.checkNotNull(remoteTbl); Table localTbl = db.getTable(jobInfo.getAliasByOriginNameIfSet(tblInfo.name)); if (localTbl != null) { if (localTbl instanceof OlapTable && localTbl.hasAutoIncrementColumn()) { ((OlapTable) localTbl).sendDropAutoIncrementMapTask(); } if (localTbl.isMaterializedView()) { MaterializedView mv = (MaterializedView) localTbl; if (mv.isActive()) { LOG.warn("Skip to restore existed and active mv: {}", mv.getName()); skipRestoreRemoteTableIds.add(remoteTbl.getId()); continue; } } tblInfo.checkAndRecoverAutoIncrementId(localTbl); if (!localTbl.isNativeTableOrMaterializedView()) { status = new Status(ErrCode.COMMON_ERROR, "Only support retore olap table: " + localTbl.getName()); return; } OlapTable localOlapTbl = (OlapTable) localTbl; OlapTable remoteOlapTbl = (OlapTable) remoteTbl; remoteOlapTbl.setColocateGroup(null); List<String> intersectPartNames = Lists.newArrayList(); Status st = localOlapTbl.getIntersectPartNamesWith(remoteOlapTbl, intersectPartNames); if (!st.ok()) { status = st; return; } LOG.debug("get intersect part names: {}, job: {}", intersectPartNames, this); if (localOlapTbl.getSignature(BackupHandler.SIGNATURE_VERSION, intersectPartNames, true) != remoteOlapTbl.getSignature(BackupHandler.SIGNATURE_VERSION, intersectPartNames, true) || localOlapTbl.getBaseSchema().size() != remoteOlapTbl.getBaseSchema().size()) { List<Pair<Integer, String>> localCheckSumList = localOlapTbl.getSignatureSequence( BackupHandler.SIGNATURE_VERSION, intersectPartNames); List<Pair<Integer, String>> remoteCheckSumList = remoteOlapTbl.getSignatureSequence( BackupHandler.SIGNATURE_VERSION, intersectPartNames); String errMsg = ""; if (localCheckSumList.size() == remoteCheckSumList.size()) { for (int i = 0; i < localCheckSumList.size(); ++i) { int localCheckSum = ((Integer) localCheckSumList.get(i).first).intValue(); int remoteCheckSum = ((Integer) remoteCheckSumList.get(i).first).intValue(); if (localCheckSum != remoteCheckSum) { errMsg = ((String) localCheckSumList.get(i).second); break; } } } status = new Status(ErrCode.COMMON_ERROR, "Table " + jobInfo.getAliasByOriginNameIfSet(tblInfo.name) + " already exist but with different schema, errMsg: " + errMsg); return; } if (localOlapTbl.getKeysType() != remoteOlapTbl.getKeysType()) { status = new Status(ErrCode.COMMON_ERROR, "Table " + jobInfo.getAliasByOriginNameIfSet(tblInfo.name) + " already exist but with different key type"); return; } for (int i = 0; i < localOlapTbl.getBaseSchema().size(); ++i) { Column localColumn = localOlapTbl.getBaseSchema().get(i); Column remoteColumn = remoteOlapTbl.getBaseSchema().get(i); if (!localColumn.equals(remoteColumn)) { status = new Status(ErrCode.COMMON_ERROR, "Table " + jobInfo.getAliasByOriginNameIfSet(tblInfo.name) + " already exist but with different schema, column: " + localColumn.getName() + " in existed table is different from the column in backup snapshot, column: " + remoteColumn.getName()); return; } } for (BackupPartitionInfo backupPartInfo : tblInfo.partitions.values()) { Partition localPartition = localOlapTbl.getPartition(backupPartInfo.name); if (localPartition != null) { if (!backupPartInfo.subPartitions.isEmpty()) { status = new Status(ErrCode.COMMON_ERROR, "Partition " + backupPartInfo.name + " in table " + localTbl.getName() + " is automatic bucket partition, can not be restored"); return; } PartitionInfo localPartInfo = localOlapTbl.getPartitionInfo(); if (localPartInfo.isRangePartition()) { RangePartitionInfo localRangePartInfo = (RangePartitionInfo) localPartInfo; RangePartitionInfo remoteRangePartInfo = (RangePartitionInfo) remoteOlapTbl.getPartitionInfo(); Range<PartitionKey> localRange = localRangePartInfo.getRange(localPartition.getId()); Range<PartitionKey> remoteRange = remoteRangePartInfo.getRange(backupPartInfo.id); if (localRange.equals(remoteRange)) { if (genFileMappingWhenBackupReplicasEqual(localPartInfo, localPartition, localTbl, backupPartInfo, tblInfo)) { return; } } else { status = new Status(ErrCode.COMMON_ERROR, "Partition " + backupPartInfo.name + " in table " + localTbl.getName() + " has different range with partition in repository"); return; } } else { if (genFileMappingWhenBackupReplicasEqual(localPartInfo, localPartition, localTbl, backupPartInfo, tblInfo)) { return; } } } else { PartitionInfo localPartitionInfo = localOlapTbl.getPartitionInfo(); if (localPartitionInfo.isRangePartition()) { RangePartitionInfo localRangePartitionInfo = (RangePartitionInfo) localPartitionInfo; RangePartitionInfo remoteRangePartitionInfo = (RangePartitionInfo) remoteOlapTbl.getPartitionInfo(); Range<PartitionKey> remoteRange = remoteRangePartitionInfo.getRange(backupPartInfo.id); if (localRangePartitionInfo.getAnyIntersectRange(remoteRange, false) != null) { status = new Status(ErrCode.COMMON_ERROR, "Partition " + backupPartInfo.name + " in table " + localTbl.getName() + " has conflict range with existing ranges"); return; } else { Partition restorePart = resetPartitionForRestore(localOlapTbl, remoteOlapTbl, backupPartInfo.name, restoreReplicationNum); if (restorePart == null) { status = new Status(ErrCode.COMMON_ERROR, "Restored partition " + backupPartInfo.name + "of " + localTbl.getName() + "is null"); return; } restoredPartitions.add(Pair.create(localOlapTbl.getName(), restorePart)); } } else { status = new Status(ErrCode.COMMON_ERROR, "No partition exist in single partitioned table " + localOlapTbl.getName()); return; } } } } else { OlapTable remoteOlapTbl = (OlapTable) remoteTbl; boolean isColocate = enableColocateRestore; Set<String> allPartNames = remoteOlapTbl.getPartitionNames(); for (String partName : allPartNames) { if (!tblInfo.containsPart(partName)) { remoteOlapTbl.dropPartitionAndReserveTablet(partName); } } if (!isColocate) { remoteOlapTbl.setColocateGroup(null); } Status st = resetTableForRestore(remoteOlapTbl, db); if (!st.ok()) { status = st; return; } ColocateTableIndex colocateTableIndex = GlobalStateMgr.getCurrentState().getColocateTableIndex(); if (isColocate && colocateTableIndex.isColocateTable(remoteOlapTbl.getId())) { ColocateTableIndex.GroupId groupId = colocateTableIndex.getGroup(remoteOlapTbl.getId()); List<List<Long>> backendsPerBucketSeq = colocateTableIndex.getBackendsPerBucketSeq(groupId); ColocatePersistInfo colocatePersistInfo = ColocatePersistInfo .createForAddTable(groupId, remoteOlapTbl.getId(), backendsPerBucketSeq); colocatePersistInfos.add(colocatePersistInfo); } tblInfo.checkAndRecoverAutoIncrementId((Table) remoteOlapTbl); remoteOlapTbl.setState(OlapTableState.RESTORE); LOG.debug("put remote table {} to restoredTbls", remoteOlapTbl.getName()); restoredTbls.add(remoteOlapTbl); } } LOG.debug("finished to prepare restored partitions and tables. {}", this); for (Pair<String, Partition> entry : restoredPartitions) { OlapTable localTbl = (OlapTable) db.getTable(entry.first); Preconditions.checkNotNull(localTbl, localTbl.getName()); Partition restorePart = entry.second; OlapTable remoteTbl = (OlapTable) backupMeta.getTable(entry.first); BackupPartitionInfo backupPartitionInfo = jobInfo.getTableInfo(entry.first).getPartInfo(restorePart.getName()); createReplicas(localTbl, restorePart); genFileMapping(localTbl, restorePart, remoteTbl.getId(), backupPartitionInfo, true); } for (Table restoreTbl : restoredTbls) { for (Partition restorePart : restoreTbl.getPartitions()) { createReplicas((OlapTable) restoreTbl, restorePart); BackupTableInfo backupTableInfo = jobInfo.getTableInfo(restoreTbl.getName()); genFileMapping((OlapTable) restoreTbl, restorePart, backupTableInfo.id, backupTableInfo.getPartInfo(restorePart.getName()), true); } ((OlapTable) restoreTbl).setName(jobInfo.getAliasByOriginNameIfSet(restoreTbl.getName())); } LOG.debug("finished to generate create replica tasks. {}", this); } finally { locker.unLockDatabase(db, LockType.READ); } sendCreateReplicaTasks(); if (!status.ok()) { return; } addRestorePartitionsAndTables(db); if (!status.ok()) { return; } LOG.info("finished to prepare meta. begin to make snapshot. {}", this); prepareAndSendSnapshotTasks(db); if (!status.ok()) { return; } metaPreparedTime = System.currentTimeMillis(); state = RestoreJobState.SNAPSHOTING; LOG.info("finished to prepare meta and send snapshot tasks, num: {}. {}", batchTask.getTaskNum(), this); }
boolean isColocate = enableColocateRestore;
private void checkAndPrepareMeta() { MetricRepo.COUNTER_UNFINISHED_RESTORE_JOB.increase(1L); Database db = globalStateMgr.getDb(dbId); if (db == null) { status = new Status(ErrCode.NOT_FOUND, "database " + dbId + " does not exist"); return; } jobId = globalStateMgr.getNextId(); if (backupMeta == null) { status = new Status(ErrCode.COMMON_ERROR, "Failed to read backup meta from file"); return; } Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { for (BackupTableInfo tblInfo : jobInfo.tables.values()) { Table tbl = db.getTable(jobInfo.getAliasByOriginNameIfSet(tblInfo.name)); if (tbl == null) { continue; } if (!tbl.isNativeTableOrMaterializedView()) { status = new Status(ErrCode.COMMON_ERROR, "Only support restore OLAP table: " + tbl.getName()); return; } OlapTable olapTbl = (OlapTable) tbl; if (olapTbl.getState() != OlapTableState.NORMAL) { status = new Status(ErrCode.COMMON_ERROR, "Table " + tbl.getName() + "'s state is not NORMAL: " + olapTbl.getState().name()); return; } if (olapTbl.existTempPartitions()) { status = new Status(ErrCode.COMMON_ERROR, "Do not support restoring table with temp partitions"); return; } olapTbl.setState(OlapTableState.RESTORE); } } finally { locker.unLockDatabase(db, LockType.WRITE); } batchTask = new AgentBatchTask(); locker.lockDatabase(db, LockType.READ); try { for (BackupTableInfo tblInfo : jobInfo.tables.values()) { Table remoteTbl = backupMeta.getTable(tblInfo.name); Preconditions.checkNotNull(remoteTbl); Table localTbl = db.getTable(jobInfo.getAliasByOriginNameIfSet(tblInfo.name)); if (localTbl != null) { if (localTbl instanceof OlapTable && localTbl.hasAutoIncrementColumn()) { ((OlapTable) localTbl).sendDropAutoIncrementMapTask(); } if (localTbl.isMaterializedView()) { MaterializedView mv = (MaterializedView) localTbl; if (mv.isActive()) { LOG.warn("Skip to restore existed and active mv: {}", mv.getName()); skipRestoreRemoteTableIds.add(remoteTbl.getId()); continue; } } tblInfo.checkAndRecoverAutoIncrementId(localTbl); if (!localTbl.isNativeTableOrMaterializedView()) { status = new Status(ErrCode.COMMON_ERROR, "Only support retore olap table: " + localTbl.getName()); return; } OlapTable localOlapTbl = (OlapTable) localTbl; OlapTable remoteOlapTbl = (OlapTable) remoteTbl; remoteOlapTbl.setColocateGroup(null); List<String> intersectPartNames = Lists.newArrayList(); Status st = localOlapTbl.getIntersectPartNamesWith(remoteOlapTbl, intersectPartNames); if (!st.ok()) { status = st; return; } LOG.debug("get intersect part names: {}, job: {}", intersectPartNames, this); if (localOlapTbl.getSignature(BackupHandler.SIGNATURE_VERSION, intersectPartNames, true) != remoteOlapTbl.getSignature(BackupHandler.SIGNATURE_VERSION, intersectPartNames, true) || localOlapTbl.getBaseSchema().size() != remoteOlapTbl.getBaseSchema().size()) { List<Pair<Integer, String>> localCheckSumList = localOlapTbl.getSignatureSequence( BackupHandler.SIGNATURE_VERSION, intersectPartNames); List<Pair<Integer, String>> remoteCheckSumList = remoteOlapTbl.getSignatureSequence( BackupHandler.SIGNATURE_VERSION, intersectPartNames); String errMsg = ""; if (localCheckSumList.size() == remoteCheckSumList.size()) { for (int i = 0; i < localCheckSumList.size(); ++i) { int localCheckSum = ((Integer) localCheckSumList.get(i).first).intValue(); int remoteCheckSum = ((Integer) remoteCheckSumList.get(i).first).intValue(); if (localCheckSum != remoteCheckSum) { errMsg = ((String) localCheckSumList.get(i).second); break; } } } status = new Status(ErrCode.COMMON_ERROR, "Table " + jobInfo.getAliasByOriginNameIfSet(tblInfo.name) + " already exist but with different schema, errMsg: " + errMsg); return; } if (localOlapTbl.getKeysType() != remoteOlapTbl.getKeysType()) { status = new Status(ErrCode.COMMON_ERROR, "Table " + jobInfo.getAliasByOriginNameIfSet(tblInfo.name) + " already exist but with different key type"); return; } for (int i = 0; i < localOlapTbl.getBaseSchema().size(); ++i) { Column localColumn = localOlapTbl.getBaseSchema().get(i); Column remoteColumn = remoteOlapTbl.getBaseSchema().get(i); if (!localColumn.equals(remoteColumn)) { status = new Status(ErrCode.COMMON_ERROR, "Table " + jobInfo.getAliasByOriginNameIfSet(tblInfo.name) + " already exist but with different schema, column: " + localColumn.getName() + " in existed table is different from the column in backup snapshot, column: " + remoteColumn.getName()); return; } } for (BackupPartitionInfo backupPartInfo : tblInfo.partitions.values()) { Partition localPartition = localOlapTbl.getPartition(backupPartInfo.name); if (localPartition != null) { if (!backupPartInfo.subPartitions.isEmpty()) { status = new Status(ErrCode.COMMON_ERROR, "Partition " + backupPartInfo.name + " in table " + localTbl.getName() + " is automatic bucket partition, can not be restored"); return; } PartitionInfo localPartInfo = localOlapTbl.getPartitionInfo(); if (localPartInfo.isRangePartition()) { RangePartitionInfo localRangePartInfo = (RangePartitionInfo) localPartInfo; RangePartitionInfo remoteRangePartInfo = (RangePartitionInfo) remoteOlapTbl.getPartitionInfo(); Range<PartitionKey> localRange = localRangePartInfo.getRange(localPartition.getId()); Range<PartitionKey> remoteRange = remoteRangePartInfo.getRange(backupPartInfo.id); if (localRange.equals(remoteRange)) { if (genFileMappingWhenBackupReplicasEqual(localPartInfo, localPartition, localTbl, backupPartInfo, tblInfo)) { return; } } else { status = new Status(ErrCode.COMMON_ERROR, "Partition " + backupPartInfo.name + " in table " + localTbl.getName() + " has different range with partition in repository"); return; } } else { if (genFileMappingWhenBackupReplicasEqual(localPartInfo, localPartition, localTbl, backupPartInfo, tblInfo)) { return; } } } else { PartitionInfo localPartitionInfo = localOlapTbl.getPartitionInfo(); if (localPartitionInfo.isRangePartition()) { RangePartitionInfo localRangePartitionInfo = (RangePartitionInfo) localPartitionInfo; RangePartitionInfo remoteRangePartitionInfo = (RangePartitionInfo) remoteOlapTbl.getPartitionInfo(); Range<PartitionKey> remoteRange = remoteRangePartitionInfo.getRange(backupPartInfo.id); if (localRangePartitionInfo.getAnyIntersectRange(remoteRange, false) != null) { status = new Status(ErrCode.COMMON_ERROR, "Partition " + backupPartInfo.name + " in table " + localTbl.getName() + " has conflict range with existing ranges"); return; } else { Partition restorePart = resetPartitionForRestore(localOlapTbl, remoteOlapTbl, backupPartInfo.name, restoreReplicationNum); if (restorePart == null) { status = new Status(ErrCode.COMMON_ERROR, "Restored partition " + backupPartInfo.name + "of " + localTbl.getName() + "is null"); return; } restoredPartitions.add(Pair.create(localOlapTbl.getName(), restorePart)); } } else { status = new Status(ErrCode.COMMON_ERROR, "No partition exist in single partitioned table " + localOlapTbl.getName()); return; } } } } else { OlapTable remoteOlapTbl = (OlapTable) remoteTbl; Set<String> allPartNames = remoteOlapTbl.getPartitionNames(); for (String partName : allPartNames) { if (!tblInfo.containsPart(partName)) { remoteOlapTbl.dropPartitionAndReserveTablet(partName); } } if (!enableColocateRestore) { remoteOlapTbl.setColocateGroup(null); } Status st = resetTableForRestore(remoteOlapTbl, db); if (!st.ok()) { status = st; return; } ColocateTableIndex colocateTableIndex = GlobalStateMgr.getCurrentState().getColocateTableIndex(); if (enableColocateRestore && colocateTableIndex.isColocateTable(remoteOlapTbl.getId())) { ColocateTableIndex.GroupId groupId = colocateTableIndex.getGroup(remoteOlapTbl.getId()); List<List<Long>> backendsPerBucketSeq = colocateTableIndex.getBackendsPerBucketSeq(groupId); ColocatePersistInfo colocatePersistInfo = ColocatePersistInfo .createForAddTable(groupId, remoteOlapTbl.getId(), backendsPerBucketSeq); colocatePersistInfos.add(colocatePersistInfo); } tblInfo.checkAndRecoverAutoIncrementId((Table) remoteOlapTbl); remoteOlapTbl.setState(OlapTableState.RESTORE); LOG.debug("put remote table {} to restoredTbls", remoteOlapTbl.getName()); restoredTbls.add(remoteOlapTbl); } } LOG.debug("finished to prepare restored partitions and tables. {}", this); for (Pair<String, Partition> entry : restoredPartitions) { OlapTable localTbl = (OlapTable) db.getTable(entry.first); Preconditions.checkNotNull(localTbl, localTbl.getName()); Partition restorePart = entry.second; OlapTable remoteTbl = (OlapTable) backupMeta.getTable(entry.first); BackupPartitionInfo backupPartitionInfo = jobInfo.getTableInfo(entry.first).getPartInfo(restorePart.getName()); createReplicas(localTbl, restorePart); genFileMapping(localTbl, restorePart, remoteTbl.getId(), backupPartitionInfo, true); } for (Table restoreTbl : restoredTbls) { for (Partition restorePart : restoreTbl.getPartitions()) { createReplicas((OlapTable) restoreTbl, restorePart); BackupTableInfo backupTableInfo = jobInfo.getTableInfo(restoreTbl.getName()); genFileMapping((OlapTable) restoreTbl, restorePart, backupTableInfo.id, backupTableInfo.getPartInfo(restorePart.getName()), true); } ((OlapTable) restoreTbl).setName(jobInfo.getAliasByOriginNameIfSet(restoreTbl.getName())); } LOG.debug("finished to generate create replica tasks. {}", this); } finally { locker.unLockDatabase(db, LockType.READ); } sendCreateReplicaTasks(); if (!status.ok()) { return; } addRestorePartitionsAndTables(db); if (!status.ok()) { return; } LOG.info("finished to prepare meta. begin to make snapshot. {}", this); prepareAndSendSnapshotTasks(db); if (!status.ok()) { return; } metaPreparedTime = System.currentTimeMillis(); state = RestoreJobState.SNAPSHOTING; LOG.info("finished to prepare meta and send snapshot tasks, num: {}. {}", batchTask.getTaskNum(), this); }
class RestoreJob extends AbstractJob { private static final Logger LOG = LogManager.getLogger(RestoreJob.class); public enum RestoreJobState { PENDING, SNAPSHOTING, DOWNLOAD, DOWNLOADING, COMMIT, COMMITTING, FINISHED, CANCELLED } @SerializedName(value = "backupTimestamp") private String backupTimestamp; @SerializedName(value = "jobInfo") protected BackupJobInfo jobInfo; @SerializedName(value = "allowLoad") private boolean allowLoad; @SerializedName(value = "state") private RestoreJobState state; @SerializedName(value = "backupMeta") protected BackupMeta backupMeta; @SerializedName(value = "fileMapping") protected RestoreFileMapping fileMapping = new RestoreFileMapping(); @SerializedName(value = "metaPreparedTime") private long metaPreparedTime = -1; @SerializedName(value = "snapshotFinishedTime") private long snapshotFinishedTime = -1; @SerializedName(value = "downloadFinishedTime") private long downloadFinishedTime = -1; @SerializedName(value = "restoreReplicationNum") protected int restoreReplicationNum; @SerializedName(value = "restoredPartitions") protected List<Pair<String, Partition>> restoredPartitions = Lists.newArrayList(); @SerializedName(value = "restoredTbls") private List<Table> restoredTbls = Lists.newArrayList(); @SerializedName(value = "restoredVersionInfo") private com.google.common.collect.Table<Long, Long, Long> restoredVersionInfo = HashBasedTable.create(); @SerializedName(value = "snapshotInfos") protected com.google.common.collect.Table<Long, Long, SnapshotInfo> snapshotInfos = HashBasedTable.create(); @SerializedName(value = "colocatePersistInfos") private List<ColocatePersistInfo> colocatePersistInfos = Lists.newArrayList(); protected Map<Long, Long> unfinishedSignatureToId = Maps.newConcurrentMap(); protected Set<Long> skipRestoreRemoteTableIds = Sets.newHashSet(); private MvRestoreContext mvRestoreContext; private AgentBatchTask batchTask; boolean enableColocateRestore = Config.enable_colocate_restore; public RestoreJob() { super(JobType.RESTORE); } public RestoreJob(String label, String backupTs, long dbId, String dbName, BackupJobInfo jobInfo, boolean allowLoad, int restoreReplicationNum, long timeoutMs, GlobalStateMgr globalStateMgr, long repoId, BackupMeta backupMeta, MvRestoreContext mvRestoreContext) { super(JobType.RESTORE, label, dbId, dbName, timeoutMs, globalStateMgr, repoId); this.backupTimestamp = backupTs; this.jobInfo = jobInfo; this.allowLoad = allowLoad; this.restoreReplicationNum = restoreReplicationNum; this.state = RestoreJobState.PENDING; this.backupMeta = backupMeta; this.mvRestoreContext = mvRestoreContext; } public RestoreJobState getState() { return state; } public RestoreFileMapping getFileMapping() { return fileMapping; } public BackupJobInfo getJobInfo() { return jobInfo; } public BackupMeta getBackupMeta() { return backupMeta; } public synchronized boolean finishTabletSnapshotTask(SnapshotTask task, TFinishTaskRequest request) { if (checkTaskStatus(task, task.getJobId(), request)) { return false; } Preconditions.checkState(request.isSetSnapshot_path()); SnapshotInfo info = new SnapshotInfo(task.getDbId(), task.getTableId(), task.getPartitionId(), task.getIndexId(), task.getTabletId(), task.getBackendId(), task.getSchemaHash(), request.getSnapshot_path(), Lists.newArrayList()); snapshotInfos.put(task.getTabletId(), task.getBackendId(), info); taskProgress.remove(task.getSignature()); Long removedTabletId = unfinishedSignatureToId.remove(task.getSignature()); if (removedTabletId != null) { taskErrMsg.remove(task.getSignature()); Preconditions.checkState(task.getTabletId() == removedTabletId, removedTabletId); LOG.debug("get finished snapshot info: {}, unfinished tasks num: {}, remove result: {}. {}", info, unfinishedSignatureToId.size(), this, removedTabletId); return true; } return false; } public synchronized boolean finishTabletDownloadTask(DownloadTask task, TFinishTaskRequest request) { if (checkTaskStatus(task, task.getJobId(), request)) { return false; } Preconditions.checkState(request.isSetDownloaded_tablet_ids()); for (Long tabletId : request.getDownloaded_tablet_ids()) { SnapshotInfo info = snapshotInfos.get(tabletId, task.getBackendId()); if (info == null) { LOG.error("failed to find snapshot infos of tablet {} in be {}, {}", tabletId, task.getBackendId(), this); return false; } } taskProgress.remove(task.getSignature()); Long beId = unfinishedSignatureToId.remove(task.getSignature()); if (beId == null || beId != task.getBackendId()) { LOG.error("invalid download task: {}. {}", task, this); return false; } taskErrMsg.remove(task.getSignature()); return true; } public synchronized boolean finishDirMoveTask(DirMoveTask task, TFinishTaskRequest request) { if (checkTaskStatus(task, task.getJobId(), request)) { return false; } taskProgress.remove(task.getSignature()); Long tabletId = unfinishedSignatureToId.remove(task.getSignature()); if (tabletId == null || tabletId != task.getTabletId()) { LOG.error("invalid dir move task: {}. {}", task, this); return false; } taskErrMsg.remove(task.getSignature()); return true; } private boolean checkTaskStatus(AgentTask task, long jobId, TFinishTaskRequest request) { Preconditions.checkState(jobId == this.jobId); Preconditions.checkState(dbId == task.getDbId()); if (request.getTask_status().getStatus_code() != TStatusCode.OK) { taskErrMsg.put(task.getSignature(), Joiner.on(",").join(request.getTask_status().getError_msgs())); return true; } return false; } @Override public synchronized void replayRun() { LOG.info("replay run restore job: {}", this); switch (state) { case DOWNLOAD: replayCheckAndPrepareMeta(); break; case FINISHED: replayWaitingAllTabletsCommitted(); break; default: break; } } @Override public synchronized void replayCancel() { cancelInternal(true /* is replay */); } @Override public boolean isPending() { return state == RestoreJobState.PENDING; } @Override public boolean isCancelled() { return state == RestoreJobState.CANCELLED; } public void setRepo(Repository repo) { this.repo = repo; } @Override public void run() { if (state == RestoreJobState.FINISHED || state == RestoreJobState.CANCELLED) { return; } if (System.currentTimeMillis() - createTime > timeoutMs) { status = new Status(ErrCode.TIMEOUT, "restore job with label: " + label + " timeout."); cancelInternal(false); return; } if (repo == null) { repo = globalStateMgr.getBackupHandler().getRepoMgr().getRepo(repoId); if (repo == null) { status = new Status(ErrCode.COMMON_ERROR, "failed to get repository: " + repoId); cancelInternal(false); return; } } LOG.info("run restore job: {}", this); checkIfNeedCancel(); if (status.ok()) { switch (state) { case PENDING: checkAndPrepareMeta(); break; case SNAPSHOTING: waitingAllSnapshotsFinished(); break; case DOWNLOAD: downloadSnapshots(); break; case DOWNLOADING: waitingAllDownloadFinished(); break; case COMMIT: commit(); break; case COMMITTING: waitingAllTabletsCommitted(); break; default: break; } } if (!status.ok()) { cancelInternal(false); } } /** * return true if some restored objs have been dropped. */ private void checkIfNeedCancel() { if (state == RestoreJobState.PENDING) { return; } Database db = globalStateMgr.getDb(dbId); if (db == null) { status = new Status(ErrCode.NOT_FOUND, "database " + dbId + " has been dropped"); return; } Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); try { for (IdChain idChain : fileMapping.getMapping().keySet()) { OlapTable tbl = (OlapTable) db.getTable(idChain.getTblId()); if (tbl == null) { status = new Status(ErrCode.NOT_FOUND, "table " + idChain.getTblId() + " has been dropped"); return; } PhysicalPartition part = tbl.getPhysicalPartition(idChain.getPartId()); if (part == null) { status = new Status(ErrCode.NOT_FOUND, "partition " + idChain.getPartId() + " has been dropped"); return; } MaterializedIndex index = part.getIndex(idChain.getIdxId()); if (index == null) { status = new Status(ErrCode.NOT_FOUND, "index " + idChain.getIdxId() + " has been dropped"); return; } } } finally { locker.unLockDatabase(db, LockType.READ); } } /** * Restore rules as follow: * A. Table already exist * A1. Partition already exist, generate file mapping * A2. Partition does not exist, add restored partition to the table. * Reset all index/tablet/replica id, and create replica on BE outside the db lock. * B. Table does not exist * B1. Add table to the db, reset all table/index/tablet/replica id, * and create replica on BE outside the db lock. * <p> * All newly created table/partition/index/tablet/replica should be saved for rolling back. * <p> * Step: * 1. download and deserialize backup meta from repository. * 2. set all existing restored table's state to RESTORE. * 3. check if the expected restore objs are valid. * 4. create replicas if necessary. * 5. add restored objs to globalStateMgr. * 6. make snapshot for all replicas for incremental download later. */ protected Status resetTableForRestore(OlapTable remoteOlapTbl, Database db) { return remoteOlapTbl.resetIdsForRestore(globalStateMgr, db, restoreReplicationNum, mvRestoreContext); } protected void sendCreateReplicaTasks() { if (batchTask.getTaskNum() > 0) { MarkedCountDownLatch<Long, Long> latch = new MarkedCountDownLatch<Long, Long>(batchTask.getTaskNum()); for (AgentTask task : batchTask.getAllTasks()) { latch.addMark(task.getBackendId(), task.getTabletId()); ((CreateReplicaTask) task).setLatch(latch); AgentTaskQueue.addTask(task); } AgentTaskExecutor.submit(batchTask); long timeout = Config.tablet_create_timeout_second * 1000L * batchTask.getTaskNum(); timeout = Math.min(10L * 60L * 1000L, timeout); boolean ok = false; try { LOG.info("begin to send create replica tasks to BE for restore. total {} tasks. timeout: {}", batchTask.getTaskNum(), timeout); ok = latch.await(timeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOG.warn("InterruptedException: ", e); } if (ok) { LOG.debug("finished to create all restored replcias. {}", this); return; } List<Entry<Long, Long>> unfinishedMarks = latch.getLeftMarks(); List<Entry<Long, Long>> subList = unfinishedMarks.subList(0, Math.min(unfinishedMarks.size(), 10)); String idStr = Joiner.on(", ").join(subList); status = new Status(ErrCode.COMMON_ERROR, "Failed to create replicas for restore. unfinished marks: " + idStr); } } protected void addRestorePartitionsAndTables(Database db) { Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { addRestoredPartitions(db, false); for (Table tbl : restoredTbls) { if (!db.registerTableUnlocked(tbl)) { status = new Status(ErrCode.COMMON_ERROR, "Table " + tbl.getName() + " already exist in db: " + db.getOriginName()); return; } } } finally { locker.unLockDatabase(db, LockType.WRITE); } } protected void prepareAndSendSnapshotTasks(Database db) { unfinishedSignatureToId.clear(); taskProgress.clear(); taskErrMsg.clear(); skipRestoreRemoteTableIds.clear(); Multimap<Long, Long> bePathsMap = HashMultimap.create(); batchTask = new AgentBatchTask(); Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); try { for (IdChain idChain : fileMapping.getMapping().keySet()) { OlapTable tbl = (OlapTable) db.getTable(idChain.getTblId()); PhysicalPartition part = tbl.getPhysicalPartition(idChain.getPartId()); MaterializedIndex index = part.getIndex(idChain.getIdxId()); LocalTablet tablet = (LocalTablet) index.getTablet(idChain.getTabletId()); Replica replica = tablet.getReplicaById(idChain.getReplicaId()); long signature = globalStateMgr.getNextId(); SnapshotTask task = new SnapshotTask(null, replica.getBackendId(), signature, jobId, db.getId(), tbl.getId(), part.getId(), index.getId(), tablet.getId(), part.getVisibleVersion(), tbl.getSchemaHashByIndexId(index.getId()), timeoutMs, true /* is restore task*/); batchTask.addTask(task); unfinishedSignatureToId.put(signature, tablet.getId()); bePathsMap.put(replica.getBackendId(), replica.getPathHash()); } } finally { locker.unLockDatabase(db, LockType.READ); } com.starrocks.common.Status st = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().checkExceedDiskCapacityLimit(bePathsMap, true); if (!st.ok()) { status = new Status(ErrCode.COMMON_ERROR, st.getErrorMsg()); return; } for (AgentTask task : batchTask.getAllTasks()) { AgentTaskQueue.addTask(task); } AgentTaskExecutor.submit(batchTask); } private boolean genFileMappingWhenBackupReplicasEqual(PartitionInfo localPartInfo, Partition localPartition, Table localTbl, BackupPartitionInfo backupPartInfo, BackupTableInfo tblInfo) { if (localPartInfo.getReplicationNum(localPartition.getId()) != restoreReplicationNum) { status = new Status(ErrCode.COMMON_ERROR, "Partition " + backupPartInfo.name + " in table " + localTbl.getName() + " has different replication num '" + localPartInfo.getReplicationNum(localPartition.getId()) + "' with partition in repository, which is " + restoreReplicationNum); return true; } OlapTable localOlapTbl = (OlapTable) localTbl; genFileMapping(localOlapTbl, localPartition, tblInfo.id, backupPartInfo, true /* overwrite when commit */); restoredVersionInfo.put(localOlapTbl.getId(), localPartition.getId(), backupPartInfo.version); return false; } protected void createReplicas(OlapTable localTbl, Partition restorePart) { Set<ColumnId> bfColumns = localTbl.getBfColumnIds(); double bfFpp = localTbl.getBfFpp(); for (PhysicalPartition physicalPartition : restorePart.getSubPartitions()) { for (MaterializedIndex restoredIdx : physicalPartition.getMaterializedIndices(IndexExtState.VISIBLE)) { MaterializedIndexMeta indexMeta = localTbl.getIndexMetaByIndexId(restoredIdx.getId()); TabletMeta tabletMeta = new TabletMeta(dbId, localTbl.getId(), physicalPartition.getId(), restoredIdx.getId(), indexMeta.getSchemaHash(), TStorageMedium.HDD); TTabletSchema tabletSchema = SchemaInfo.newBuilder() .setId(indexMeta.getSchemaId()) .setKeysType(indexMeta.getKeysType()) .setShortKeyColumnCount(indexMeta.getShortKeyColumnCount()) .setSchemaHash(indexMeta.getSchemaHash()) .setSortKeyIndexes(indexMeta.getSortKeyIdxes()) .setSortKeyUniqueIds(indexMeta.getSortKeyUniqueIds()) .setStorageType(indexMeta.getStorageType()) .addColumns(indexMeta.getSchema()) .setBloomFilterColumnNames(bfColumns) .setBloomFilterFpp(bfFpp) .setIndexes(localTbl.getCopiedIndexes()) .build().toTabletSchema(); for (Tablet restoreTablet : restoredIdx.getTablets()) { GlobalStateMgr.getCurrentState().getTabletInvertedIndex().addTablet(restoreTablet.getId(), tabletMeta); for (Replica restoreReplica : ((LocalTablet) restoreTablet).getImmutableReplicas()) { GlobalStateMgr.getCurrentState().getTabletInvertedIndex() .addReplica(restoreTablet.getId(), restoreReplica); LOG.info("tablet {} physical partition {} index {} replica {}", restoreTablet.getId(), physicalPartition.getId(), restoredIdx.getId(), restoreReplica.getId()); CreateReplicaTask task = CreateReplicaTask.newBuilder() .setNodeId(restoreReplica.getBackendId()) .setDbId(dbId) .setTableId(localTbl.getId()) .setPartitionId(physicalPartition.getId()) .setIndexId(restoredIdx.getId()) .setTabletId(restoreTablet.getId()) .setVersion(restoreReplica.getVersion()) .setStorageMedium(TStorageMedium.HDD) /* all restored replicas will be saved to HDD */ .setEnablePersistentIndex(localTbl.enablePersistentIndex()) .setPrimaryIndexCacheExpireSec(localTbl.primaryIndexCacheExpireSec()) .setTabletType(localTbl.getPartitionInfo().getTabletType(restorePart.getId())) .setCompressionType(localTbl.getCompressionType()) .setCompressionLevel(localTbl.getCompressionLevel()) .setInRestoreMode(true) .setTabletSchema(tabletSchema) .build(); batchTask.addTask(task); } } } } } public Partition resetPartitionForRestore(OlapTable localTbl, OlapTable remoteTbl, String partName, int restoreReplicationNum) { Preconditions.checkState(localTbl.getPartition(partName) == null); Partition remotePart = remoteTbl.getPartition(partName); Preconditions.checkNotNull(remotePart); PartitionInfo localPartitionInfo = localTbl.getPartitionInfo(); Preconditions.checkState(localPartitionInfo.isRangePartition()); long newPartId = globalStateMgr.getNextId(); remotePart.setIdForRestore(newPartId); Map<String, Long> localIdxNameToId = localTbl.getIndexNameToId(); for (String localIdxName : localIdxNameToId.keySet()) { long remoteIdxId = remoteTbl.getIndexIdByName(localIdxName); MaterializedIndex remoteIdx = remotePart.getIndex(remoteIdxId); long localIdxId = localIdxNameToId.get(localIdxName); remoteIdx.setIdForRestore(localIdxId); if (localIdxId != localTbl.getBaseIndexId()) { remotePart.deleteRollupIndex(remoteIdxId); remotePart.createRollupIndex(remoteIdx); } } for (PhysicalPartition physicalPartition : remotePart.getSubPartitions()) { if (physicalPartition.getId() != newPartId) { remotePart.removeSubPartition(physicalPartition.getId()); long newPhysicalPartId = globalStateMgr.getNextId(); physicalPartition.setIdForRestore(newPhysicalPartId); physicalPartition.setParentId(newPartId); remotePart.addSubPartition(physicalPartition); } long visibleVersion = physicalPartition.getVisibleVersion(); for (MaterializedIndex remoteIdx : physicalPartition.getMaterializedIndices(IndexExtState.VISIBLE)) { int schemaHash = remoteTbl.getSchemaHashByIndexId(remoteIdx.getId()); int remotetabletSize = remoteIdx.getTablets().size(); remoteIdx.clearTabletsForRestore(); status = remoteTbl.createTabletsForRestore(remotetabletSize, remoteIdx, globalStateMgr, restoreReplicationNum, visibleVersion, schemaHash, physicalPartition.getId(), physicalPartition.getShardGroupId(), null); if (!status.ok()) { return null; } } } return remotePart; } protected void genFileMapping(OlapTable localTbl, Partition localPartition, Long remoteTblId, BackupPartitionInfo backupPartInfo, boolean overwrite) { if (localPartition.getSubPartitions().size() > 1) { genFileMappingWithSubPartition(localTbl, localPartition, remoteTblId, backupPartInfo, overwrite); } else { genFileMappingWithPartition(localTbl, localPartition, remoteTblId, backupPartInfo, overwrite); } } protected void genFileMappingWithPartition(OlapTable localTbl, Partition localPartition, Long remoteTblId, BackupPartitionInfo backupPartInfo, boolean overwrite) { for (MaterializedIndex localIdx : localPartition.getMaterializedIndices(IndexExtState.VISIBLE)) { BackupIndexInfo backupIdxInfo = backupPartInfo.getIdx(localTbl.getIndexNameById(localIdx.getId())); Preconditions.checkState(backupIdxInfo.tablets.size() == localIdx.getTablets().size()); for (int i = 0; i < localIdx.getTablets().size(); i++) { LocalTablet localTablet = (LocalTablet) localIdx.getTablets().get(i); BackupTabletInfo backupTabletInfo = backupIdxInfo.tablets.get(i); for (Replica localReplica : localTablet.getImmutableReplicas()) { IdChain src = new IdChain(remoteTblId, backupPartInfo.id, backupIdxInfo.id, backupTabletInfo.id, -1L /* no replica id */); IdChain dest = new IdChain(localTbl.getId(), localPartition.getId(), localIdx.getId(), localTablet.getId(), localReplica.getId()); fileMapping.putMapping(dest, src, overwrite); LOG.debug("tablet mapping: {} to {} file mapping: {} to {}", backupTabletInfo.id, localTablet.getId(), src, dest); } } } } protected void genFileMappingWithSubPartition(OlapTable localTbl, Partition localPartition, Long remoteTblId, BackupPartitionInfo backupPartInfo, boolean overwrite) { for (PhysicalPartition physicalPartition : localPartition.getSubPartitions()) { BackupPhysicalPartitionInfo physicalPartitionInfo = backupPartInfo.subPartitions.get( physicalPartition.getBeforeRestoreId()); for (MaterializedIndex localIdx : physicalPartition.getMaterializedIndices(IndexExtState.VISIBLE)) { BackupIndexInfo backupIdxInfo = physicalPartitionInfo.getIdx(localTbl.getIndexNameById(localIdx.getId())); Preconditions.checkState(backupIdxInfo.tablets.size() == localIdx.getTablets().size()); for (int i = 0; i < localIdx.getTablets().size(); i++) { LocalTablet localTablet = (LocalTablet) localIdx.getTablets().get(i); BackupTabletInfo backupTabletInfo = backupIdxInfo.tablets.get(i); for (Replica localReplica : localTablet.getImmutableReplicas()) { IdChain src = new IdChain(remoteTblId, physicalPartitionInfo.id, backupIdxInfo.id, backupTabletInfo.id, -1L /* no replica id */); IdChain dest = new IdChain(localTbl.getId(), physicalPartition.getId(), localIdx.getId(), localTablet.getId(), localReplica.getId()); fileMapping.putMapping(dest, src, overwrite); LOG.debug("tablet mapping: {} to {} file mapping: {} to {}", backupTabletInfo.id, localTablet.getId(), src, dest); } } } } } private void replayCheckAndPrepareMeta() { Database db = globalStateMgr.getDb(dbId); Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { for (BackupTableInfo tblInfo : jobInfo.tables.values()) { Table tbl = db.getTable(jobInfo.getAliasByOriginNameIfSet(tblInfo.name)); if (tbl == null) { continue; } OlapTable olapTbl = (OlapTable) tbl; olapTbl.setState(OlapTableState.RESTORE); } addRestoredPartitions(db, true); for (Table restoreTbl : restoredTbls) { db.registerTableUnlocked(restoreTbl); for (Partition restorePart : restoreTbl.getPartitions()) { modifyInvertedIndex((OlapTable) restoreTbl, restorePart); } } } finally { locker.unLockDatabase(db, LockType.WRITE); } LOG.info("replay check and prepare meta. {}", this); } protected void addRestoredPartitions(Database db, boolean modify) { for (Pair<String, Partition> entry : restoredPartitions) { OlapTable localTbl = (OlapTable) db.getTable(entry.first); Partition restorePart = entry.second; OlapTable remoteTbl = (OlapTable) backupMeta.getTable(entry.first); RangePartitionInfo localPartitionInfo = (RangePartitionInfo) localTbl.getPartitionInfo(); RangePartitionInfo remotePartitionInfo = (RangePartitionInfo) remoteTbl.getPartitionInfo(); BackupPartitionInfo backupPartitionInfo = jobInfo.getTableInfo(entry.first).getPartInfo(restorePart.getName()); long remotePartId = backupPartitionInfo.id; Range<PartitionKey> remoteRange = remotePartitionInfo.getRange(remotePartId); DataProperty remoteDataProperty = remotePartitionInfo.getDataProperty(remotePartId); localPartitionInfo.addPartition(restorePart.getId(), false, remoteRange, remoteDataProperty, (short) restoreReplicationNum, remotePartitionInfo.getIsInMemory(remotePartId)); localTbl.addPartition(restorePart); if (modify) { modifyInvertedIndex(localTbl, restorePart); } } } protected void modifyInvertedIndex(OlapTable restoreTbl, Partition restorePart) { for (MaterializedIndex restoreIdx : restorePart.getMaterializedIndices(IndexExtState.VISIBLE)) { int schemaHash = restoreTbl.getSchemaHashByIndexId(restoreIdx.getId()); TabletMeta tabletMeta = new TabletMeta(dbId, restoreTbl.getId(), restorePart.getId(), restoreIdx.getId(), schemaHash, TStorageMedium.HDD); for (Tablet restoreTablet : restoreIdx.getTablets()) { GlobalStateMgr.getCurrentState().getTabletInvertedIndex().addTablet(restoreTablet.getId(), tabletMeta); for (Replica restoreReplica : ((LocalTablet) restoreTablet).getImmutableReplicas()) { GlobalStateMgr.getCurrentState().getTabletInvertedIndex() .addReplica(restoreTablet.getId(), restoreReplica); } } } } private void waitingAllSnapshotsFinished() { if (unfinishedSignatureToId.isEmpty()) { snapshotFinishedTime = System.currentTimeMillis(); state = RestoreJobState.DOWNLOAD; globalStateMgr.getEditLog().logRestoreJob(this); for (ColocatePersistInfo colocatePersistInfo : colocatePersistInfos) { GlobalStateMgr.getCurrentState().getEditLog().logColocateAddTable(colocatePersistInfo); } LOG.info("finished making snapshots. {}", this); return; } LOG.info("waiting {} replicas to make snapshot: [{}]. {}", unfinishedSignatureToId.size(), unfinishedSignatureToId, this); return; } private void downloadSnapshots() { ArrayListMultimap<Long, SnapshotInfo> dbToSnapshotInfos = ArrayListMultimap.create(); for (SnapshotInfo info : snapshotInfos.values()) { dbToSnapshotInfos.put(info.getDbId(), info); } unfinishedSignatureToId.clear(); taskProgress.clear(); taskErrMsg.clear(); skipRestoreRemoteTableIds.clear(); batchTask = new AgentBatchTask(); for (long dbId : dbToSnapshotInfos.keySet()) { List<SnapshotInfo> infos = dbToSnapshotInfos.get(dbId); Database db = globalStateMgr.getDb(dbId); if (db == null) { status = new Status(ErrCode.NOT_FOUND, "db " + dbId + " does not exist"); return; } ArrayListMultimap<Long, SnapshotInfo> beToSnapshots = ArrayListMultimap.create(); for (SnapshotInfo info : infos) { beToSnapshots.put(info.getBeId(), info); } Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); try { for (Long beId : beToSnapshots.keySet()) { List<SnapshotInfo> beSnapshotInfos = beToSnapshots.get(beId); List<FsBroker> brokerAddrs = Lists.newArrayList(); THdfsProperties hdfsProperties = new THdfsProperties(); if (repo.getStorage().hasBroker()) { Status st = repo.getBrokerAddress(beId, globalStateMgr, brokerAddrs); if (!st.ok()) { status = st; return; } Preconditions.checkState(brokerAddrs.size() == 1); } else { BrokerDesc brokerDesc = new BrokerDesc(repo.getStorage().getProperties()); try { HdfsUtil.getTProperties(repo.getLocation(), brokerDesc, hdfsProperties); } catch (UserException e) { status = new Status(ErrCode.COMMON_ERROR, "Get properties from " + repo.getLocation() + " error."); return; } } prepareDownloadTasks(beSnapshotInfos, db, beId, brokerAddrs, hdfsProperties); if (!status.ok()) { return; } } } finally { locker.unLockDatabase(db, LockType.READ); } } sendDownloadTasks(); state = RestoreJobState.DOWNLOADING; LOG.info("finished to send download tasks to BE. num: {}. {}", batchTask.getTaskNum(), this); return; } protected void waitingAllDownloadFinished() { if (unfinishedSignatureToId.isEmpty()) { downloadFinishedTime = System.currentTimeMillis(); state = RestoreJobState.COMMIT; backupMeta = null; globalStateMgr.getEditLog().logRestoreJob(this); LOG.info("finished to download. {}", this); } LOG.info("waiting {} tasks to finish downloading from repo. {}", unfinishedSignatureToId.size(), this); } protected void prepareDownloadTasks(List<SnapshotInfo> beSnapshotInfos, Database db, long beId, List<FsBroker> brokerAddrs, THdfsProperties hdfsProperties) { int totalNum = beSnapshotInfos.size(); int batchNum = totalNum; if (Config.max_download_task_per_be > 0) { batchNum = Math.min(totalNum, Config.max_download_task_per_be); } int taskNumPerBatch = Math.max(totalNum / batchNum, 1); LOG.debug("backend {} has {} batch, total {} tasks, {}", beId, batchNum, totalNum, this); int index = 0; for (int batch = 0; batch < batchNum; batch++) { Map<String, String> srcToDest = Maps.newHashMap(); int currentBatchTaskNum = (batch == batchNum - 1) ? totalNum - index : taskNumPerBatch; for (int j = 0; j < currentBatchTaskNum; j++) { SnapshotInfo info = beSnapshotInfos.get(index++); Table tbl = db.getTable(info.getTblId()); if (tbl == null) { status = new Status(ErrCode.NOT_FOUND, "restored table " + info.getTabletId() + " does not exist"); return; } OlapTable olapTbl = (OlapTable) tbl; PhysicalPartition part = olapTbl.getPhysicalPartition(info.getPartitionId()); if (part == null) { status = new Status(ErrCode.NOT_FOUND, "partition " + info.getPartitionId() + " does not exist in restored table: " + tbl.getName()); return; } MaterializedIndex idx = part.getIndex(info.getIndexId()); if (idx == null) { status = new Status(ErrCode.NOT_FOUND, "index " + info.getIndexId() + " does not exist in partion " + part.getId() + "of restored table " + tbl.getName()); return; } LocalTablet tablet = (LocalTablet) idx.getTablet(info.getTabletId()); if (tablet == null) { status = new Status(ErrCode.NOT_FOUND, "tablet " + info.getTabletId() + " does not exist in restored table " + tbl.getName()); return; } Replica replica = tablet.getReplicaByBackendId(info.getBeId()); if (replica == null) { status = new Status(ErrCode.NOT_FOUND, "replica in be " + info.getBeId() + " of tablet " + tablet.getId() + " does not exist in restored table " + tbl.getName()); return; } IdChain catalogIds = new IdChain(tbl.getId(), part.getId(), idx.getId(), info.getTabletId(), replica.getId()); IdChain repoIds = fileMapping.get(catalogIds); if (repoIds == null) { status = new Status(ErrCode.NOT_FOUND, "failed to get id mapping of globalStateMgr ids: " + catalogIds.toString()); LOG.info("current file mapping: {}", fileMapping); return; } String repoTabletPath = jobInfo.getFilePath(repoIds); String src = repo.getRepoPath(label, repoTabletPath); SnapshotInfo snapshotInfo = snapshotInfos.get(info.getTabletId(), info.getBeId()); Preconditions.checkNotNull(snapshotInfo, info.getTabletId() + "-" + info.getBeId()); String dest = snapshotInfo.getTabletPath(); srcToDest.put(src, dest); LOG.debug("catalog id: {}, repo id: {}, repoTabletPath: {}, src: {}, dest: {}", catalogIds, repoIds, repoTabletPath, src, dest); } long signature = globalStateMgr.getNextId(); DownloadTask task; if (repo.getStorage().hasBroker()) { task = new DownloadTask(null, beId, signature, jobId, dbId, srcToDest, brokerAddrs.get(0), repo.getStorage().getProperties()); } else { task = new DownloadTask(null, beId, signature, jobId, dbId, srcToDest, null, repo.getStorage().getProperties(), hdfsProperties); } batchTask.addTask(task); unfinishedSignatureToId.put(signature, beId); } } protected void sendDownloadTasks() { for (AgentTask task : batchTask.getAllTasks()) { AgentTaskQueue.addTask(task); } AgentTaskExecutor.submit(batchTask); } private void commit() { unfinishedSignatureToId.clear(); taskProgress.clear(); taskErrMsg.clear(); skipRestoreRemoteTableIds.clear(); prepareAndSendDirMoveTasks(); state = RestoreJobState.COMMITTING; } protected void prepareAndSendDirMoveTasks() { batchTask = new AgentBatchTask(); for (Cell<Long, Long, SnapshotInfo> cell : snapshotInfos.cellSet()) { SnapshotInfo info = cell.getValue(); long signature = globalStateMgr.getNextId(); DirMoveTask task = new DirMoveTask(null, cell.getColumnKey(), signature, jobId, dbId, info.getTblId(), info.getPartitionId(), info.getTabletId(), cell.getRowKey(), info.getTabletPath(), info.getSchemaHash(), true /* need reload tablet header */); batchTask.addTask(task); unfinishedSignatureToId.put(signature, info.getTabletId()); } for (AgentTask task : batchTask.getAllTasks()) { AgentTaskQueue.addTask(task); } AgentTaskExecutor.submit(batchTask); LOG.info("finished to send move dir tasks. num: {}. {}", batchTask.getTaskNum(), this); } protected void waitingAllTabletsCommitted() { if (unfinishedSignatureToId.isEmpty()) { LOG.info("finished to commit all tablet. {}", this); Status st = allTabletCommitted(false /* not replay */); if (!st.ok()) { status = st; } MetricRepo.COUNTER_UNFINISHED_RESTORE_JOB.increase(-1L); return; } LOG.info("waiting {} tablets to commit. {}", unfinishedSignatureToId.size(), this); } private Status allTabletCommitted(boolean isReplay) { Database db = globalStateMgr.getDb(dbId); if (db == null) { return new Status(ErrCode.NOT_FOUND, "database " + dbId + " does not exist"); } Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { setTableStateToNormal(db); for (long tblId : restoredVersionInfo.rowKeySet()) { Table tbl = db.getTable(tblId); if (tbl == null) { continue; } OlapTable olapTbl = (OlapTable) tbl; Map<Long, Long> map = restoredVersionInfo.rowMap().get(tblId); for (Map.Entry<Long, Long> entry : map.entrySet()) { long partId = entry.getKey(); PhysicalPartition part = olapTbl.getPhysicalPartition(partId); if (part == null) { continue; } part.updateVersionForRestore(entry.getValue()); for (MaterializedIndex idx : part.getMaterializedIndices(IndexExtState.VISIBLE)) { updateTablets(idx, part); } LOG.debug("restore set partition {} version in table {}, version: {}", partId, tblId, entry.getValue()); } } } finally { locker.unLockDatabase(db, LockType.WRITE); } if (!isReplay) { restoredPartitions.clear(); restoredTbls.clear(); releaseSnapshots(); snapshotInfos.clear(); finishedTime = System.currentTimeMillis(); state = RestoreJobState.FINISHED; globalStateMgr.getEditLog().logRestoreJob(this); locker.lockDatabase(db, LockType.READ); try { for (BackupTableInfo tblInfo : jobInfo.tables.values()) { Table tbl = db.getTable(jobInfo.getAliasByOriginNameIfSet(tblInfo.name)); if (skipRestoreRemoteTableIds.contains(tblInfo.id)) { continue; } if (tbl == null) { LOG.warn("skip post actions after restore success, table name does not existed: %s", tblInfo.name); continue; } if (!tbl.isNativeTableOrMaterializedView()) { continue; } LOG.info("do post actions for table : {}", tbl.getName()); if (tbl.isOlapTable()) { try { DynamicPartitionUtil.registerOrRemovePartitionScheduleInfo(db.getId(), (OlapTable) tbl); } catch (Exception e) { LOG.warn(String.format("register table %s for dynamic partition scheduler failed: ", tbl.getName()), e); } } if (tbl instanceof OlapTable) { OlapTable olapTable = (OlapTable) tbl; try { olapTable.doAfterRestore(mvRestoreContext); } catch (Exception e) { LOG.warn(String.format("rebuild olap table %s failed: ", olapTable.getName()), e); } } } } catch (Exception e) { LOG.warn("Do post actions after restore success failed: ", e); throw e; } finally { locker.unLockDatabase(db, LockType.READ); } } LOG.info("job is finished. is replay: {}. {}", isReplay, this); return Status.OK; } protected void updateTablets(MaterializedIndex idx, PhysicalPartition part) { for (Tablet tablet : idx.getTablets()) { for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { replica.updateForRestore(part.getVisibleVersion(), replica.getDataSize(), replica.getRowCount()); replica.setLastReportVersion(part.getVisibleVersion()); } } } protected void releaseSnapshots() { if (snapshotInfos.isEmpty()) { return; } batchTask = new AgentBatchTask(); for (SnapshotInfo info : snapshotInfos.values()) { ReleaseSnapshotTask releaseTask = new ReleaseSnapshotTask(null, info.getBeId(), info.getDbId(), info.getTabletId(), info.getPath()); batchTask.addTask(releaseTask); } AgentTaskExecutor.submit(batchTask); LOG.info("send {} release snapshot tasks, job: {}", snapshotInfos.size(), this); } private void replayWaitingAllTabletsCommitted() { allTabletCommitted(true /* is replay */); } public List<String> getInfo() { List<String> info = Lists.newArrayList(); info.add(String.valueOf(jobId)); info.add(label); info.add(backupTimestamp); info.add(dbName); info.add(state.name()); info.add(String.valueOf(allowLoad)); info.add(String.valueOf(restoreReplicationNum)); info.add(getRestoreObjs()); info.add(TimeUtils.longToTimeString(createTime)); info.add(TimeUtils.longToTimeString(metaPreparedTime)); info.add(TimeUtils.longToTimeString(snapshotFinishedTime)); info.add(TimeUtils.longToTimeString(downloadFinishedTime)); info.add(TimeUtils.longToTimeString(finishedTime)); try { info.add(Joiner.on(", ").join(unfinishedSignatureToId.entrySet())); info.add(Joiner.on(", ").join(taskProgress.entrySet().stream().map( e -> "[" + e.getKey() + ": " + e.getValue().first + "/" + e.getValue().second + "]").collect( Collectors.toList()))); info.add(Joiner.on(", ").join(taskErrMsg.entrySet().stream().map(n -> "[" + n.getKey() + ": " + n.getValue() + "]").collect(Collectors.toList()))); } catch (Exception e) { throw new SemanticException("meta data may has been updated during this period, please try again"); } info.add(status.toString()); info.add(String.valueOf(timeoutMs / 1000)); return info; } private String getRestoreObjs() { Preconditions.checkState(jobInfo != null); return jobInfo.getInfo(); } @Override public boolean isDone() { if (state == RestoreJobState.FINISHED || state == RestoreJobState.CANCELLED) { return true; } return false; } @Override public synchronized Status cancel() { if (isDone()) { return new Status(ErrCode.COMMON_ERROR, "Job with label " + label + " can not be cancelled. state: " + state); } status = new Status(ErrCode.COMMON_ERROR, "user cancelled, current state: " + state.name()); cancelInternal(false); MetricRepo.COUNTER_UNFINISHED_RESTORE_JOB.increase(-1L); return Status.OK; } public void cancelInternal(boolean isReplay) { if (!isReplay) { switch (state) { case SNAPSHOTING: for (Long taskId : unfinishedSignatureToId.keySet()) { AgentTaskQueue.removeTaskOfType(TTaskType.MAKE_SNAPSHOT, taskId); } break; case DOWNLOADING: for (Long taskId : unfinishedSignatureToId.keySet()) { AgentTaskQueue.removeTaskOfType(TTaskType.DOWNLOAD, taskId); } break; case COMMITTING: for (Long taskId : unfinishedSignatureToId.keySet()) { AgentTaskQueue.removeTaskOfType(TTaskType.MOVE, taskId); } break; default: break; } } Database db = globalStateMgr.getDb(dbId); if (db != null) { Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { setTableStateToNormal(db); for (Table restoreTbl : restoredTbls) { LOG.info("remove restored table when cancelled: {}", restoreTbl.getName()); for (Partition part : restoreTbl.getPartitions()) { for (MaterializedIndex idx : part.getMaterializedIndices(IndexExtState.VISIBLE)) { for (Tablet tablet : idx.getTablets()) { GlobalStateMgr.getCurrentState().getTabletInvertedIndex().deleteTablet(tablet.getId()); } } } db.dropTable(restoreTbl.getName()); } for (Pair<String, Partition> entry : restoredPartitions) { OlapTable restoreTbl = (OlapTable) db.getTable(entry.first); if (restoreTbl == null) { continue; } LOG.info("remove restored partition in table {} when cancelled: {}", restoreTbl.getName(), entry.second.getName()); restoreTbl.dropPartition(dbId, entry.second.getName(), true /* is restore */); } } finally { locker.unLockDatabase(db, LockType.WRITE); } } for (ColocatePersistInfo colocatePersistInfo : colocatePersistInfos) { for (Table restoreTbl : restoredTbls) { if (restoreTbl instanceof OlapTable && restoreTbl.getId() == colocatePersistInfo.getTableId()) { GlobalStateMgr.getCurrentState().getColocateTableIndex() .removeTable(restoreTbl.getId(), (OlapTable) restoreTbl, isReplay); } } } if (!isReplay) { backupMeta = null; releaseSnapshots(); snapshotInfos.clear(); RestoreJobState curState = state; finishedTime = System.currentTimeMillis(); state = RestoreJobState.CANCELLED; globalStateMgr.getEditLog().logRestoreJob(this); LOG.info("finished to cancel restore job. current state: {}. is replay: {}. {}", curState.name(), isReplay, this); return; } LOG.info("finished to cancel restore job. is replay: {}. {}", isReplay, this); } private void setTableStateToNormal(Database db) { for (BackupTableInfo tblInfo : jobInfo.tables.values()) { if (skipRestoreRemoteTableIds.contains(tblInfo.id)) { continue; } Table tbl = db.getTable(jobInfo.getAliasByOriginNameIfSet(tblInfo.name)); if (tbl == null) { continue; } if (!tbl.isNativeTableOrMaterializedView()) { continue; } OlapTable olapTbl = (OlapTable) tbl; if (olapTbl.getState() == OlapTableState.RESTORE || olapTbl.getState() == OlapTableState.RESTORE_WITH_LOAD) { olapTbl.setState(OlapTableState.NORMAL); } } } public static RestoreJob read(DataInput in) throws IOException { RestoreJob job = new RestoreJob(); job.readFields(in); return job; } @Override public void write(DataOutput out) throws IOException { super.write(out); Text.writeString(out, backupTimestamp); jobInfo.write(out); out.writeBoolean(allowLoad); Text.writeString(out, state.name()); if (backupMeta != null) { out.writeBoolean(true); backupMeta.write(out); } else { out.writeBoolean(false); } fileMapping.write(out); out.writeLong(metaPreparedTime); out.writeLong(snapshotFinishedTime); out.writeLong(downloadFinishedTime); out.writeInt(restoreReplicationNum); out.writeInt(restoredPartitions.size()); for (Pair<String, Partition> entry : restoredPartitions) { Text.writeString(out, entry.first); entry.second.write(out); } out.writeInt(restoredTbls.size()); for (Table tbl : restoredTbls) { tbl.write(out); } out.writeInt(restoredVersionInfo.rowKeySet().size()); for (long tblId : restoredVersionInfo.rowKeySet()) { out.writeLong(tblId); out.writeInt(restoredVersionInfo.row(tblId).size()); for (Map.Entry<Long, Long> entry : restoredVersionInfo.row(tblId).entrySet()) { out.writeLong(entry.getKey()); out.writeLong(entry.getValue()); out.writeLong(0); } } out.writeInt(snapshotInfos.rowKeySet().size()); for (long tabletId : snapshotInfos.rowKeySet()) { out.writeLong(tabletId); Map<Long, SnapshotInfo> map = snapshotInfos.row(tabletId); out.writeInt(map.size()); for (Map.Entry<Long, SnapshotInfo> entry : map.entrySet()) { out.writeLong(entry.getKey()); entry.getValue().write(out); } } } @Override public String toString() { StringBuilder sb = new StringBuilder(super.toString()); sb.append(", backup ts: ").append(backupTimestamp); sb.append(", state: ").append(state.name()); return sb.toString(); } }
class RestoreJob extends AbstractJob { private static final Logger LOG = LogManager.getLogger(RestoreJob.class); public enum RestoreJobState { PENDING, SNAPSHOTING, DOWNLOAD, DOWNLOADING, COMMIT, COMMITTING, FINISHED, CANCELLED } @SerializedName(value = "backupTimestamp") private String backupTimestamp; @SerializedName(value = "jobInfo") protected BackupJobInfo jobInfo; @SerializedName(value = "allowLoad") private boolean allowLoad; @SerializedName(value = "state") private RestoreJobState state; @SerializedName(value = "backupMeta") protected BackupMeta backupMeta; @SerializedName(value = "fileMapping") protected RestoreFileMapping fileMapping = new RestoreFileMapping(); @SerializedName(value = "metaPreparedTime") private long metaPreparedTime = -1; @SerializedName(value = "snapshotFinishedTime") private long snapshotFinishedTime = -1; @SerializedName(value = "downloadFinishedTime") private long downloadFinishedTime = -1; @SerializedName(value = "restoreReplicationNum") protected int restoreReplicationNum; @SerializedName(value = "restoredPartitions") protected List<Pair<String, Partition>> restoredPartitions = Lists.newArrayList(); @SerializedName(value = "restoredTbls") private List<Table> restoredTbls = Lists.newArrayList(); @SerializedName(value = "restoredVersionInfo") private com.google.common.collect.Table<Long, Long, Long> restoredVersionInfo = HashBasedTable.create(); @SerializedName(value = "snapshotInfos") protected com.google.common.collect.Table<Long, Long, SnapshotInfo> snapshotInfos = HashBasedTable.create(); @SerializedName(value = "colocatePersistInfos") private List<ColocatePersistInfo> colocatePersistInfos = Lists.newArrayList(); protected Map<Long, Long> unfinishedSignatureToId = Maps.newConcurrentMap(); protected Set<Long> skipRestoreRemoteTableIds = Sets.newHashSet(); private MvRestoreContext mvRestoreContext; private AgentBatchTask batchTask; boolean enableColocateRestore = Config.enable_colocate_restore; public RestoreJob() { super(JobType.RESTORE); } public RestoreJob(String label, String backupTs, long dbId, String dbName, BackupJobInfo jobInfo, boolean allowLoad, int restoreReplicationNum, long timeoutMs, GlobalStateMgr globalStateMgr, long repoId, BackupMeta backupMeta, MvRestoreContext mvRestoreContext) { super(JobType.RESTORE, label, dbId, dbName, timeoutMs, globalStateMgr, repoId); this.backupTimestamp = backupTs; this.jobInfo = jobInfo; this.allowLoad = allowLoad; this.restoreReplicationNum = restoreReplicationNum; this.state = RestoreJobState.PENDING; this.backupMeta = backupMeta; this.mvRestoreContext = mvRestoreContext; } public RestoreJobState getState() { return state; } public RestoreFileMapping getFileMapping() { return fileMapping; } public BackupJobInfo getJobInfo() { return jobInfo; } public BackupMeta getBackupMeta() { return backupMeta; } public synchronized boolean finishTabletSnapshotTask(SnapshotTask task, TFinishTaskRequest request) { if (checkTaskStatus(task, task.getJobId(), request)) { return false; } Preconditions.checkState(request.isSetSnapshot_path()); SnapshotInfo info = new SnapshotInfo(task.getDbId(), task.getTableId(), task.getPartitionId(), task.getIndexId(), task.getTabletId(), task.getBackendId(), task.getSchemaHash(), request.getSnapshot_path(), Lists.newArrayList()); snapshotInfos.put(task.getTabletId(), task.getBackendId(), info); taskProgress.remove(task.getSignature()); Long removedTabletId = unfinishedSignatureToId.remove(task.getSignature()); if (removedTabletId != null) { taskErrMsg.remove(task.getSignature()); Preconditions.checkState(task.getTabletId() == removedTabletId, removedTabletId); LOG.debug("get finished snapshot info: {}, unfinished tasks num: {}, remove result: {}. {}", info, unfinishedSignatureToId.size(), this, removedTabletId); return true; } return false; } public synchronized boolean finishTabletDownloadTask(DownloadTask task, TFinishTaskRequest request) { if (checkTaskStatus(task, task.getJobId(), request)) { return false; } Preconditions.checkState(request.isSetDownloaded_tablet_ids()); for (Long tabletId : request.getDownloaded_tablet_ids()) { SnapshotInfo info = snapshotInfos.get(tabletId, task.getBackendId()); if (info == null) { LOG.error("failed to find snapshot infos of tablet {} in be {}, {}", tabletId, task.getBackendId(), this); return false; } } taskProgress.remove(task.getSignature()); Long beId = unfinishedSignatureToId.remove(task.getSignature()); if (beId == null || beId != task.getBackendId()) { LOG.error("invalid download task: {}. {}", task, this); return false; } taskErrMsg.remove(task.getSignature()); return true; } public synchronized boolean finishDirMoveTask(DirMoveTask task, TFinishTaskRequest request) { if (checkTaskStatus(task, task.getJobId(), request)) { return false; } taskProgress.remove(task.getSignature()); Long tabletId = unfinishedSignatureToId.remove(task.getSignature()); if (tabletId == null || tabletId != task.getTabletId()) { LOG.error("invalid dir move task: {}. {}", task, this); return false; } taskErrMsg.remove(task.getSignature()); return true; } private boolean checkTaskStatus(AgentTask task, long jobId, TFinishTaskRequest request) { Preconditions.checkState(jobId == this.jobId); Preconditions.checkState(dbId == task.getDbId()); if (request.getTask_status().getStatus_code() != TStatusCode.OK) { taskErrMsg.put(task.getSignature(), Joiner.on(",").join(request.getTask_status().getError_msgs())); return true; } return false; } @Override public synchronized void replayRun() { LOG.info("replay run restore job: {}", this); switch (state) { case DOWNLOAD: replayCheckAndPrepareMeta(); break; case FINISHED: replayWaitingAllTabletsCommitted(); break; default: break; } } @Override public synchronized void replayCancel() { cancelInternal(true /* is replay */); } @Override public boolean isPending() { return state == RestoreJobState.PENDING; } @Override public boolean isCancelled() { return state == RestoreJobState.CANCELLED; } public void setRepo(Repository repo) { this.repo = repo; } @Override public void run() { if (state == RestoreJobState.FINISHED || state == RestoreJobState.CANCELLED) { return; } if (System.currentTimeMillis() - createTime > timeoutMs) { status = new Status(ErrCode.TIMEOUT, "restore job with label: " + label + " timeout."); cancelInternal(false); return; } if (repo == null) { repo = globalStateMgr.getBackupHandler().getRepoMgr().getRepo(repoId); if (repo == null) { status = new Status(ErrCode.COMMON_ERROR, "failed to get repository: " + repoId); cancelInternal(false); return; } } LOG.info("run restore job: {}", this); checkIfNeedCancel(); if (status.ok()) { switch (state) { case PENDING: checkAndPrepareMeta(); break; case SNAPSHOTING: waitingAllSnapshotsFinished(); break; case DOWNLOAD: downloadSnapshots(); break; case DOWNLOADING: waitingAllDownloadFinished(); break; case COMMIT: commit(); break; case COMMITTING: waitingAllTabletsCommitted(); break; default: break; } } if (!status.ok()) { cancelInternal(false); } } /** * return true if some restored objs have been dropped. */ private void checkIfNeedCancel() { if (state == RestoreJobState.PENDING) { return; } Database db = globalStateMgr.getDb(dbId); if (db == null) { status = new Status(ErrCode.NOT_FOUND, "database " + dbId + " has been dropped"); return; } Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); try { for (IdChain idChain : fileMapping.getMapping().keySet()) { OlapTable tbl = (OlapTable) db.getTable(idChain.getTblId()); if (tbl == null) { status = new Status(ErrCode.NOT_FOUND, "table " + idChain.getTblId() + " has been dropped"); return; } PhysicalPartition part = tbl.getPhysicalPartition(idChain.getPartId()); if (part == null) { status = new Status(ErrCode.NOT_FOUND, "partition " + idChain.getPartId() + " has been dropped"); return; } MaterializedIndex index = part.getIndex(idChain.getIdxId()); if (index == null) { status = new Status(ErrCode.NOT_FOUND, "index " + idChain.getIdxId() + " has been dropped"); return; } } } finally { locker.unLockDatabase(db, LockType.READ); } } /** * Restore rules as follow: * A. Table already exist * A1. Partition already exist, generate file mapping * A2. Partition does not exist, add restored partition to the table. * Reset all index/tablet/replica id, and create replica on BE outside the db lock. * B. Table does not exist * B1. Add table to the db, reset all table/index/tablet/replica id, * and create replica on BE outside the db lock. * <p> * All newly created table/partition/index/tablet/replica should be saved for rolling back. * <p> * Step: * 1. download and deserialize backup meta from repository. * 2. set all existing restored table's state to RESTORE. * 3. check if the expected restore objs are valid. * 4. create replicas if necessary. * 5. add restored objs to globalStateMgr. * 6. make snapshot for all replicas for incremental download later. */ protected Status resetTableForRestore(OlapTable remoteOlapTbl, Database db) { return remoteOlapTbl.resetIdsForRestore(globalStateMgr, db, restoreReplicationNum, mvRestoreContext); } protected void sendCreateReplicaTasks() { if (batchTask.getTaskNum() > 0) { MarkedCountDownLatch<Long, Long> latch = new MarkedCountDownLatch<Long, Long>(batchTask.getTaskNum()); for (AgentTask task : batchTask.getAllTasks()) { latch.addMark(task.getBackendId(), task.getTabletId()); ((CreateReplicaTask) task).setLatch(latch); AgentTaskQueue.addTask(task); } AgentTaskExecutor.submit(batchTask); long timeout = Config.tablet_create_timeout_second * 1000L * batchTask.getTaskNum(); timeout = Math.min(10L * 60L * 1000L, timeout); boolean ok = false; try { LOG.info("begin to send create replica tasks to BE for restore. total {} tasks. timeout: {}", batchTask.getTaskNum(), timeout); ok = latch.await(timeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOG.warn("InterruptedException: ", e); } if (ok) { LOG.debug("finished to create all restored replcias. {}", this); return; } List<Entry<Long, Long>> unfinishedMarks = latch.getLeftMarks(); List<Entry<Long, Long>> subList = unfinishedMarks.subList(0, Math.min(unfinishedMarks.size(), 10)); String idStr = Joiner.on(", ").join(subList); status = new Status(ErrCode.COMMON_ERROR, "Failed to create replicas for restore. unfinished marks: " + idStr); } } protected void addRestorePartitionsAndTables(Database db) { Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { addRestoredPartitions(db, false); for (Table tbl : restoredTbls) { if (!db.registerTableUnlocked(tbl)) { status = new Status(ErrCode.COMMON_ERROR, "Table " + tbl.getName() + " already exist in db: " + db.getOriginName()); return; } } } finally { locker.unLockDatabase(db, LockType.WRITE); } } protected void prepareAndSendSnapshotTasks(Database db) { unfinishedSignatureToId.clear(); taskProgress.clear(); taskErrMsg.clear(); skipRestoreRemoteTableIds.clear(); Multimap<Long, Long> bePathsMap = HashMultimap.create(); batchTask = new AgentBatchTask(); Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); try { for (IdChain idChain : fileMapping.getMapping().keySet()) { OlapTable tbl = (OlapTable) db.getTable(idChain.getTblId()); PhysicalPartition part = tbl.getPhysicalPartition(idChain.getPartId()); MaterializedIndex index = part.getIndex(idChain.getIdxId()); LocalTablet tablet = (LocalTablet) index.getTablet(idChain.getTabletId()); Replica replica = tablet.getReplicaById(idChain.getReplicaId()); long signature = globalStateMgr.getNextId(); SnapshotTask task = new SnapshotTask(null, replica.getBackendId(), signature, jobId, db.getId(), tbl.getId(), part.getId(), index.getId(), tablet.getId(), part.getVisibleVersion(), tbl.getSchemaHashByIndexId(index.getId()), timeoutMs, true /* is restore task*/); batchTask.addTask(task); unfinishedSignatureToId.put(signature, tablet.getId()); bePathsMap.put(replica.getBackendId(), replica.getPathHash()); } } finally { locker.unLockDatabase(db, LockType.READ); } com.starrocks.common.Status st = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().checkExceedDiskCapacityLimit(bePathsMap, true); if (!st.ok()) { status = new Status(ErrCode.COMMON_ERROR, st.getErrorMsg()); return; } for (AgentTask task : batchTask.getAllTasks()) { AgentTaskQueue.addTask(task); } AgentTaskExecutor.submit(batchTask); } private boolean genFileMappingWhenBackupReplicasEqual(PartitionInfo localPartInfo, Partition localPartition, Table localTbl, BackupPartitionInfo backupPartInfo, BackupTableInfo tblInfo) { if (localPartInfo.getReplicationNum(localPartition.getId()) != restoreReplicationNum) { status = new Status(ErrCode.COMMON_ERROR, "Partition " + backupPartInfo.name + " in table " + localTbl.getName() + " has different replication num '" + localPartInfo.getReplicationNum(localPartition.getId()) + "' with partition in repository, which is " + restoreReplicationNum); return true; } OlapTable localOlapTbl = (OlapTable) localTbl; genFileMapping(localOlapTbl, localPartition, tblInfo.id, backupPartInfo, true /* overwrite when commit */); restoredVersionInfo.put(localOlapTbl.getId(), localPartition.getId(), backupPartInfo.version); return false; } protected void createReplicas(OlapTable localTbl, Partition restorePart) { Set<ColumnId> bfColumns = localTbl.getBfColumnIds(); double bfFpp = localTbl.getBfFpp(); for (PhysicalPartition physicalPartition : restorePart.getSubPartitions()) { for (MaterializedIndex restoredIdx : physicalPartition.getMaterializedIndices(IndexExtState.VISIBLE)) { MaterializedIndexMeta indexMeta = localTbl.getIndexMetaByIndexId(restoredIdx.getId()); TabletMeta tabletMeta = new TabletMeta(dbId, localTbl.getId(), physicalPartition.getId(), restoredIdx.getId(), indexMeta.getSchemaHash(), TStorageMedium.HDD); TTabletSchema tabletSchema = SchemaInfo.newBuilder() .setId(indexMeta.getSchemaId()) .setKeysType(indexMeta.getKeysType()) .setShortKeyColumnCount(indexMeta.getShortKeyColumnCount()) .setSchemaHash(indexMeta.getSchemaHash()) .setSortKeyIndexes(indexMeta.getSortKeyIdxes()) .setSortKeyUniqueIds(indexMeta.getSortKeyUniqueIds()) .setStorageType(indexMeta.getStorageType()) .addColumns(indexMeta.getSchema()) .setBloomFilterColumnNames(bfColumns) .setBloomFilterFpp(bfFpp) .setIndexes(localTbl.getCopiedIndexes()) .build().toTabletSchema(); for (Tablet restoreTablet : restoredIdx.getTablets()) { GlobalStateMgr.getCurrentState().getTabletInvertedIndex().addTablet(restoreTablet.getId(), tabletMeta); for (Replica restoreReplica : ((LocalTablet) restoreTablet).getImmutableReplicas()) { GlobalStateMgr.getCurrentState().getTabletInvertedIndex() .addReplica(restoreTablet.getId(), restoreReplica); LOG.info("tablet {} physical partition {} index {} replica {}", restoreTablet.getId(), physicalPartition.getId(), restoredIdx.getId(), restoreReplica.getId()); CreateReplicaTask task = CreateReplicaTask.newBuilder() .setNodeId(restoreReplica.getBackendId()) .setDbId(dbId) .setTableId(localTbl.getId()) .setPartitionId(physicalPartition.getId()) .setIndexId(restoredIdx.getId()) .setTabletId(restoreTablet.getId()) .setVersion(restoreReplica.getVersion()) .setStorageMedium(TStorageMedium.HDD) /* all restored replicas will be saved to HDD */ .setEnablePersistentIndex(localTbl.enablePersistentIndex()) .setPrimaryIndexCacheExpireSec(localTbl.primaryIndexCacheExpireSec()) .setTabletType(localTbl.getPartitionInfo().getTabletType(restorePart.getId())) .setCompressionType(localTbl.getCompressionType()) .setCompressionLevel(localTbl.getCompressionLevel()) .setInRestoreMode(true) .setTabletSchema(tabletSchema) .build(); batchTask.addTask(task); } } } } } public Partition resetPartitionForRestore(OlapTable localTbl, OlapTable remoteTbl, String partName, int restoreReplicationNum) { Preconditions.checkState(localTbl.getPartition(partName) == null); Partition remotePart = remoteTbl.getPartition(partName); Preconditions.checkNotNull(remotePart); PartitionInfo localPartitionInfo = localTbl.getPartitionInfo(); Preconditions.checkState(localPartitionInfo.isRangePartition()); long newPartId = globalStateMgr.getNextId(); remotePart.setIdForRestore(newPartId); Map<String, Long> localIdxNameToId = localTbl.getIndexNameToId(); for (String localIdxName : localIdxNameToId.keySet()) { long remoteIdxId = remoteTbl.getIndexIdByName(localIdxName); MaterializedIndex remoteIdx = remotePart.getIndex(remoteIdxId); long localIdxId = localIdxNameToId.get(localIdxName); remoteIdx.setIdForRestore(localIdxId); if (localIdxId != localTbl.getBaseIndexId()) { remotePart.deleteRollupIndex(remoteIdxId); remotePart.createRollupIndex(remoteIdx); } } for (PhysicalPartition physicalPartition : remotePart.getSubPartitions()) { if (physicalPartition.getId() != newPartId) { remotePart.removeSubPartition(physicalPartition.getId()); long newPhysicalPartId = globalStateMgr.getNextId(); physicalPartition.setIdForRestore(newPhysicalPartId); physicalPartition.setParentId(newPartId); remotePart.addSubPartition(physicalPartition); } long visibleVersion = physicalPartition.getVisibleVersion(); for (MaterializedIndex remoteIdx : physicalPartition.getMaterializedIndices(IndexExtState.VISIBLE)) { int schemaHash = remoteTbl.getSchemaHashByIndexId(remoteIdx.getId()); int remotetabletSize = remoteIdx.getTablets().size(); remoteIdx.clearTabletsForRestore(); status = remoteTbl.createTabletsForRestore(remotetabletSize, remoteIdx, globalStateMgr, restoreReplicationNum, visibleVersion, schemaHash, physicalPartition.getId(), physicalPartition.getShardGroupId(), null); if (!status.ok()) { return null; } } } return remotePart; } protected void genFileMapping(OlapTable localTbl, Partition localPartition, Long remoteTblId, BackupPartitionInfo backupPartInfo, boolean overwrite) { if (localPartition.getSubPartitions().size() > 1) { genFileMappingWithSubPartition(localTbl, localPartition, remoteTblId, backupPartInfo, overwrite); } else { genFileMappingWithPartition(localTbl, localPartition, remoteTblId, backupPartInfo, overwrite); } } protected void genFileMappingWithPartition(OlapTable localTbl, Partition localPartition, Long remoteTblId, BackupPartitionInfo backupPartInfo, boolean overwrite) { for (MaterializedIndex localIdx : localPartition.getMaterializedIndices(IndexExtState.VISIBLE)) { BackupIndexInfo backupIdxInfo = backupPartInfo.getIdx(localTbl.getIndexNameById(localIdx.getId())); Preconditions.checkState(backupIdxInfo.tablets.size() == localIdx.getTablets().size()); for (int i = 0; i < localIdx.getTablets().size(); i++) { LocalTablet localTablet = (LocalTablet) localIdx.getTablets().get(i); BackupTabletInfo backupTabletInfo = backupIdxInfo.tablets.get(i); for (Replica localReplica : localTablet.getImmutableReplicas()) { IdChain src = new IdChain(remoteTblId, backupPartInfo.id, backupIdxInfo.id, backupTabletInfo.id, -1L /* no replica id */); IdChain dest = new IdChain(localTbl.getId(), localPartition.getId(), localIdx.getId(), localTablet.getId(), localReplica.getId()); fileMapping.putMapping(dest, src, overwrite); LOG.debug("tablet mapping: {} to {} file mapping: {} to {}", backupTabletInfo.id, localTablet.getId(), src, dest); } } } } protected void genFileMappingWithSubPartition(OlapTable localTbl, Partition localPartition, Long remoteTblId, BackupPartitionInfo backupPartInfo, boolean overwrite) { for (PhysicalPartition physicalPartition : localPartition.getSubPartitions()) { BackupPhysicalPartitionInfo physicalPartitionInfo = backupPartInfo.subPartitions.get( physicalPartition.getBeforeRestoreId()); for (MaterializedIndex localIdx : physicalPartition.getMaterializedIndices(IndexExtState.VISIBLE)) { BackupIndexInfo backupIdxInfo = physicalPartitionInfo.getIdx(localTbl.getIndexNameById(localIdx.getId())); Preconditions.checkState(backupIdxInfo.tablets.size() == localIdx.getTablets().size()); for (int i = 0; i < localIdx.getTablets().size(); i++) { LocalTablet localTablet = (LocalTablet) localIdx.getTablets().get(i); BackupTabletInfo backupTabletInfo = backupIdxInfo.tablets.get(i); for (Replica localReplica : localTablet.getImmutableReplicas()) { IdChain src = new IdChain(remoteTblId, physicalPartitionInfo.id, backupIdxInfo.id, backupTabletInfo.id, -1L /* no replica id */); IdChain dest = new IdChain(localTbl.getId(), physicalPartition.getId(), localIdx.getId(), localTablet.getId(), localReplica.getId()); fileMapping.putMapping(dest, src, overwrite); LOG.debug("tablet mapping: {} to {} file mapping: {} to {}", backupTabletInfo.id, localTablet.getId(), src, dest); } } } } } private void replayCheckAndPrepareMeta() { Database db = globalStateMgr.getDb(dbId); Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { for (BackupTableInfo tblInfo : jobInfo.tables.values()) { Table tbl = db.getTable(jobInfo.getAliasByOriginNameIfSet(tblInfo.name)); if (tbl == null) { continue; } OlapTable olapTbl = (OlapTable) tbl; olapTbl.setState(OlapTableState.RESTORE); } addRestoredPartitions(db, true); for (Table restoreTbl : restoredTbls) { db.registerTableUnlocked(restoreTbl); for (Partition restorePart : restoreTbl.getPartitions()) { modifyInvertedIndex((OlapTable) restoreTbl, restorePart); } } } finally { locker.unLockDatabase(db, LockType.WRITE); } LOG.info("replay check and prepare meta. {}", this); } protected void addRestoredPartitions(Database db, boolean modify) { for (Pair<String, Partition> entry : restoredPartitions) { OlapTable localTbl = (OlapTable) db.getTable(entry.first); Partition restorePart = entry.second; OlapTable remoteTbl = (OlapTable) backupMeta.getTable(entry.first); RangePartitionInfo localPartitionInfo = (RangePartitionInfo) localTbl.getPartitionInfo(); RangePartitionInfo remotePartitionInfo = (RangePartitionInfo) remoteTbl.getPartitionInfo(); BackupPartitionInfo backupPartitionInfo = jobInfo.getTableInfo(entry.first).getPartInfo(restorePart.getName()); long remotePartId = backupPartitionInfo.id; Range<PartitionKey> remoteRange = remotePartitionInfo.getRange(remotePartId); DataProperty remoteDataProperty = remotePartitionInfo.getDataProperty(remotePartId); localPartitionInfo.addPartition(restorePart.getId(), false, remoteRange, remoteDataProperty, (short) restoreReplicationNum, remotePartitionInfo.getIsInMemory(remotePartId)); localTbl.addPartition(restorePart); if (modify) { modifyInvertedIndex(localTbl, restorePart); } } } protected void modifyInvertedIndex(OlapTable restoreTbl, Partition restorePart) { for (MaterializedIndex restoreIdx : restorePart.getMaterializedIndices(IndexExtState.VISIBLE)) { int schemaHash = restoreTbl.getSchemaHashByIndexId(restoreIdx.getId()); TabletMeta tabletMeta = new TabletMeta(dbId, restoreTbl.getId(), restorePart.getId(), restoreIdx.getId(), schemaHash, TStorageMedium.HDD); for (Tablet restoreTablet : restoreIdx.getTablets()) { GlobalStateMgr.getCurrentState().getTabletInvertedIndex().addTablet(restoreTablet.getId(), tabletMeta); for (Replica restoreReplica : ((LocalTablet) restoreTablet).getImmutableReplicas()) { GlobalStateMgr.getCurrentState().getTabletInvertedIndex() .addReplica(restoreTablet.getId(), restoreReplica); } } } } private void waitingAllSnapshotsFinished() { if (unfinishedSignatureToId.isEmpty()) { snapshotFinishedTime = System.currentTimeMillis(); state = RestoreJobState.DOWNLOAD; globalStateMgr.getEditLog().logRestoreJob(this); for (ColocatePersistInfo colocatePersistInfo : colocatePersistInfos) { GlobalStateMgr.getCurrentState().getEditLog().logColocateAddTable(colocatePersistInfo); } LOG.info("finished making snapshots. {}", this); return; } LOG.info("waiting {} replicas to make snapshot: [{}]. {}", unfinishedSignatureToId.size(), unfinishedSignatureToId, this); return; } private void downloadSnapshots() { ArrayListMultimap<Long, SnapshotInfo> dbToSnapshotInfos = ArrayListMultimap.create(); for (SnapshotInfo info : snapshotInfos.values()) { dbToSnapshotInfos.put(info.getDbId(), info); } unfinishedSignatureToId.clear(); taskProgress.clear(); taskErrMsg.clear(); skipRestoreRemoteTableIds.clear(); batchTask = new AgentBatchTask(); for (long dbId : dbToSnapshotInfos.keySet()) { List<SnapshotInfo> infos = dbToSnapshotInfos.get(dbId); Database db = globalStateMgr.getDb(dbId); if (db == null) { status = new Status(ErrCode.NOT_FOUND, "db " + dbId + " does not exist"); return; } ArrayListMultimap<Long, SnapshotInfo> beToSnapshots = ArrayListMultimap.create(); for (SnapshotInfo info : infos) { beToSnapshots.put(info.getBeId(), info); } Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); try { for (Long beId : beToSnapshots.keySet()) { List<SnapshotInfo> beSnapshotInfos = beToSnapshots.get(beId); List<FsBroker> brokerAddrs = Lists.newArrayList(); THdfsProperties hdfsProperties = new THdfsProperties(); if (repo.getStorage().hasBroker()) { Status st = repo.getBrokerAddress(beId, globalStateMgr, brokerAddrs); if (!st.ok()) { status = st; return; } Preconditions.checkState(brokerAddrs.size() == 1); } else { BrokerDesc brokerDesc = new BrokerDesc(repo.getStorage().getProperties()); try { HdfsUtil.getTProperties(repo.getLocation(), brokerDesc, hdfsProperties); } catch (UserException e) { status = new Status(ErrCode.COMMON_ERROR, "Get properties from " + repo.getLocation() + " error."); return; } } prepareDownloadTasks(beSnapshotInfos, db, beId, brokerAddrs, hdfsProperties); if (!status.ok()) { return; } } } finally { locker.unLockDatabase(db, LockType.READ); } } sendDownloadTasks(); state = RestoreJobState.DOWNLOADING; LOG.info("finished to send download tasks to BE. num: {}. {}", batchTask.getTaskNum(), this); return; } protected void waitingAllDownloadFinished() { if (unfinishedSignatureToId.isEmpty()) { downloadFinishedTime = System.currentTimeMillis(); state = RestoreJobState.COMMIT; backupMeta = null; globalStateMgr.getEditLog().logRestoreJob(this); LOG.info("finished to download. {}", this); } LOG.info("waiting {} tasks to finish downloading from repo. {}", unfinishedSignatureToId.size(), this); } protected void prepareDownloadTasks(List<SnapshotInfo> beSnapshotInfos, Database db, long beId, List<FsBroker> brokerAddrs, THdfsProperties hdfsProperties) { int totalNum = beSnapshotInfos.size(); int batchNum = totalNum; if (Config.max_download_task_per_be > 0) { batchNum = Math.min(totalNum, Config.max_download_task_per_be); } int taskNumPerBatch = Math.max(totalNum / batchNum, 1); LOG.debug("backend {} has {} batch, total {} tasks, {}", beId, batchNum, totalNum, this); int index = 0; for (int batch = 0; batch < batchNum; batch++) { Map<String, String> srcToDest = Maps.newHashMap(); int currentBatchTaskNum = (batch == batchNum - 1) ? totalNum - index : taskNumPerBatch; for (int j = 0; j < currentBatchTaskNum; j++) { SnapshotInfo info = beSnapshotInfos.get(index++); Table tbl = db.getTable(info.getTblId()); if (tbl == null) { status = new Status(ErrCode.NOT_FOUND, "restored table " + info.getTabletId() + " does not exist"); return; } OlapTable olapTbl = (OlapTable) tbl; PhysicalPartition part = olapTbl.getPhysicalPartition(info.getPartitionId()); if (part == null) { status = new Status(ErrCode.NOT_FOUND, "partition " + info.getPartitionId() + " does not exist in restored table: " + tbl.getName()); return; } MaterializedIndex idx = part.getIndex(info.getIndexId()); if (idx == null) { status = new Status(ErrCode.NOT_FOUND, "index " + info.getIndexId() + " does not exist in partion " + part.getId() + "of restored table " + tbl.getName()); return; } LocalTablet tablet = (LocalTablet) idx.getTablet(info.getTabletId()); if (tablet == null) { status = new Status(ErrCode.NOT_FOUND, "tablet " + info.getTabletId() + " does not exist in restored table " + tbl.getName()); return; } Replica replica = tablet.getReplicaByBackendId(info.getBeId()); if (replica == null) { status = new Status(ErrCode.NOT_FOUND, "replica in be " + info.getBeId() + " of tablet " + tablet.getId() + " does not exist in restored table " + tbl.getName()); return; } IdChain catalogIds = new IdChain(tbl.getId(), part.getId(), idx.getId(), info.getTabletId(), replica.getId()); IdChain repoIds = fileMapping.get(catalogIds); if (repoIds == null) { status = new Status(ErrCode.NOT_FOUND, "failed to get id mapping of globalStateMgr ids: " + catalogIds.toString()); LOG.info("current file mapping: {}", fileMapping); return; } String repoTabletPath = jobInfo.getFilePath(repoIds); String src = repo.getRepoPath(label, repoTabletPath); SnapshotInfo snapshotInfo = snapshotInfos.get(info.getTabletId(), info.getBeId()); Preconditions.checkNotNull(snapshotInfo, info.getTabletId() + "-" + info.getBeId()); String dest = snapshotInfo.getTabletPath(); srcToDest.put(src, dest); LOG.debug("catalog id: {}, repo id: {}, repoTabletPath: {}, src: {}, dest: {}", catalogIds, repoIds, repoTabletPath, src, dest); } long signature = globalStateMgr.getNextId(); DownloadTask task; if (repo.getStorage().hasBroker()) { task = new DownloadTask(null, beId, signature, jobId, dbId, srcToDest, brokerAddrs.get(0), repo.getStorage().getProperties()); } else { task = new DownloadTask(null, beId, signature, jobId, dbId, srcToDest, null, repo.getStorage().getProperties(), hdfsProperties); } batchTask.addTask(task); unfinishedSignatureToId.put(signature, beId); } } protected void sendDownloadTasks() { for (AgentTask task : batchTask.getAllTasks()) { AgentTaskQueue.addTask(task); } AgentTaskExecutor.submit(batchTask); } private void commit() { unfinishedSignatureToId.clear(); taskProgress.clear(); taskErrMsg.clear(); skipRestoreRemoteTableIds.clear(); prepareAndSendDirMoveTasks(); state = RestoreJobState.COMMITTING; } protected void prepareAndSendDirMoveTasks() { batchTask = new AgentBatchTask(); for (Cell<Long, Long, SnapshotInfo> cell : snapshotInfos.cellSet()) { SnapshotInfo info = cell.getValue(); long signature = globalStateMgr.getNextId(); DirMoveTask task = new DirMoveTask(null, cell.getColumnKey(), signature, jobId, dbId, info.getTblId(), info.getPartitionId(), info.getTabletId(), cell.getRowKey(), info.getTabletPath(), info.getSchemaHash(), true /* need reload tablet header */); batchTask.addTask(task); unfinishedSignatureToId.put(signature, info.getTabletId()); } for (AgentTask task : batchTask.getAllTasks()) { AgentTaskQueue.addTask(task); } AgentTaskExecutor.submit(batchTask); LOG.info("finished to send move dir tasks. num: {}. {}", batchTask.getTaskNum(), this); } protected void waitingAllTabletsCommitted() { if (unfinishedSignatureToId.isEmpty()) { LOG.info("finished to commit all tablet. {}", this); Status st = allTabletCommitted(false /* not replay */); if (!st.ok()) { status = st; } MetricRepo.COUNTER_UNFINISHED_RESTORE_JOB.increase(-1L); return; } LOG.info("waiting {} tablets to commit. {}", unfinishedSignatureToId.size(), this); } private Status allTabletCommitted(boolean isReplay) { Database db = globalStateMgr.getDb(dbId); if (db == null) { return new Status(ErrCode.NOT_FOUND, "database " + dbId + " does not exist"); } Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { setTableStateToNormal(db); for (long tblId : restoredVersionInfo.rowKeySet()) { Table tbl = db.getTable(tblId); if (tbl == null) { continue; } OlapTable olapTbl = (OlapTable) tbl; Map<Long, Long> map = restoredVersionInfo.rowMap().get(tblId); for (Map.Entry<Long, Long> entry : map.entrySet()) { long partId = entry.getKey(); PhysicalPartition part = olapTbl.getPhysicalPartition(partId); if (part == null) { continue; } part.updateVersionForRestore(entry.getValue()); for (MaterializedIndex idx : part.getMaterializedIndices(IndexExtState.VISIBLE)) { updateTablets(idx, part); } LOG.debug("restore set partition {} version in table {}, version: {}", partId, tblId, entry.getValue()); } } } finally { locker.unLockDatabase(db, LockType.WRITE); } if (!isReplay) { restoredPartitions.clear(); restoredTbls.clear(); releaseSnapshots(); snapshotInfos.clear(); finishedTime = System.currentTimeMillis(); state = RestoreJobState.FINISHED; globalStateMgr.getEditLog().logRestoreJob(this); locker.lockDatabase(db, LockType.READ); try { for (BackupTableInfo tblInfo : jobInfo.tables.values()) { Table tbl = db.getTable(jobInfo.getAliasByOriginNameIfSet(tblInfo.name)); if (skipRestoreRemoteTableIds.contains(tblInfo.id)) { continue; } if (tbl == null) { LOG.warn("skip post actions after restore success, table name does not existed: %s", tblInfo.name); continue; } if (!tbl.isNativeTableOrMaterializedView()) { continue; } LOG.info("do post actions for table : {}", tbl.getName()); if (tbl.isOlapTable()) { try { DynamicPartitionUtil.registerOrRemovePartitionScheduleInfo(db.getId(), (OlapTable) tbl); } catch (Exception e) { LOG.warn(String.format("register table %s for dynamic partition scheduler failed: ", tbl.getName()), e); } } if (tbl instanceof OlapTable) { OlapTable olapTable = (OlapTable) tbl; try { olapTable.doAfterRestore(mvRestoreContext); } catch (Exception e) { LOG.warn(String.format("rebuild olap table %s failed: ", olapTable.getName()), e); } } } } catch (Exception e) { LOG.warn("Do post actions after restore success failed: ", e); throw e; } finally { locker.unLockDatabase(db, LockType.READ); } } LOG.info("job is finished. is replay: {}. {}", isReplay, this); return Status.OK; } protected void updateTablets(MaterializedIndex idx, PhysicalPartition part) { for (Tablet tablet : idx.getTablets()) { for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { replica.updateForRestore(part.getVisibleVersion(), replica.getDataSize(), replica.getRowCount()); replica.setLastReportVersion(part.getVisibleVersion()); } } } protected void releaseSnapshots() { if (snapshotInfos.isEmpty()) { return; } batchTask = new AgentBatchTask(); for (SnapshotInfo info : snapshotInfos.values()) { ReleaseSnapshotTask releaseTask = new ReleaseSnapshotTask(null, info.getBeId(), info.getDbId(), info.getTabletId(), info.getPath()); batchTask.addTask(releaseTask); } AgentTaskExecutor.submit(batchTask); LOG.info("send {} release snapshot tasks, job: {}", snapshotInfos.size(), this); } private void replayWaitingAllTabletsCommitted() { allTabletCommitted(true /* is replay */); } public List<String> getInfo() { List<String> info = Lists.newArrayList(); info.add(String.valueOf(jobId)); info.add(label); info.add(backupTimestamp); info.add(dbName); info.add(state.name()); info.add(String.valueOf(allowLoad)); info.add(String.valueOf(restoreReplicationNum)); info.add(getRestoreObjs()); info.add(TimeUtils.longToTimeString(createTime)); info.add(TimeUtils.longToTimeString(metaPreparedTime)); info.add(TimeUtils.longToTimeString(snapshotFinishedTime)); info.add(TimeUtils.longToTimeString(downloadFinishedTime)); info.add(TimeUtils.longToTimeString(finishedTime)); try { info.add(Joiner.on(", ").join(unfinishedSignatureToId.entrySet())); info.add(Joiner.on(", ").join(taskProgress.entrySet().stream().map( e -> "[" + e.getKey() + ": " + e.getValue().first + "/" + e.getValue().second + "]").collect( Collectors.toList()))); info.add(Joiner.on(", ").join(taskErrMsg.entrySet().stream().map(n -> "[" + n.getKey() + ": " + n.getValue() + "]").collect(Collectors.toList()))); } catch (Exception e) { throw new SemanticException("meta data may has been updated during this period, please try again"); } info.add(status.toString()); info.add(String.valueOf(timeoutMs / 1000)); return info; } private String getRestoreObjs() { Preconditions.checkState(jobInfo != null); return jobInfo.getInfo(); } @Override public boolean isDone() { if (state == RestoreJobState.FINISHED || state == RestoreJobState.CANCELLED) { return true; } return false; } @Override public synchronized Status cancel() { if (isDone()) { return new Status(ErrCode.COMMON_ERROR, "Job with label " + label + " can not be cancelled. state: " + state); } status = new Status(ErrCode.COMMON_ERROR, "user cancelled, current state: " + state.name()); cancelInternal(false); MetricRepo.COUNTER_UNFINISHED_RESTORE_JOB.increase(-1L); return Status.OK; } public void cancelInternal(boolean isReplay) { if (!isReplay) { switch (state) { case SNAPSHOTING: for (Long taskId : unfinishedSignatureToId.keySet()) { AgentTaskQueue.removeTaskOfType(TTaskType.MAKE_SNAPSHOT, taskId); } break; case DOWNLOADING: for (Long taskId : unfinishedSignatureToId.keySet()) { AgentTaskQueue.removeTaskOfType(TTaskType.DOWNLOAD, taskId); } break; case COMMITTING: for (Long taskId : unfinishedSignatureToId.keySet()) { AgentTaskQueue.removeTaskOfType(TTaskType.MOVE, taskId); } break; default: break; } } Database db = globalStateMgr.getDb(dbId); if (db != null) { Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); try { setTableStateToNormal(db); for (Table restoreTbl : restoredTbls) { LOG.info("remove restored table when cancelled: {}", restoreTbl.getName()); for (Partition part : restoreTbl.getPartitions()) { for (MaterializedIndex idx : part.getMaterializedIndices(IndexExtState.VISIBLE)) { for (Tablet tablet : idx.getTablets()) { GlobalStateMgr.getCurrentState().getTabletInvertedIndex().deleteTablet(tablet.getId()); } } } db.dropTable(restoreTbl.getName()); } for (Pair<String, Partition> entry : restoredPartitions) { OlapTable restoreTbl = (OlapTable) db.getTable(entry.first); if (restoreTbl == null) { continue; } LOG.info("remove restored partition in table {} when cancelled: {}", restoreTbl.getName(), entry.second.getName()); restoreTbl.dropPartition(dbId, entry.second.getName(), true /* is restore */); } } finally { locker.unLockDatabase(db, LockType.WRITE); } } for (ColocatePersistInfo colocatePersistInfo : colocatePersistInfos) { for (Table restoreTbl : restoredTbls) { if (restoreTbl instanceof OlapTable && restoreTbl.getId() == colocatePersistInfo.getTableId()) { GlobalStateMgr.getCurrentState().getColocateTableIndex() .removeTable(restoreTbl.getId(), (OlapTable) restoreTbl, isReplay); } } } if (!isReplay) { backupMeta = null; releaseSnapshots(); snapshotInfos.clear(); RestoreJobState curState = state; finishedTime = System.currentTimeMillis(); state = RestoreJobState.CANCELLED; globalStateMgr.getEditLog().logRestoreJob(this); LOG.info("finished to cancel restore job. current state: {}. is replay: {}. {}", curState.name(), isReplay, this); return; } LOG.info("finished to cancel restore job. is replay: {}. {}", isReplay, this); } private void setTableStateToNormal(Database db) { for (BackupTableInfo tblInfo : jobInfo.tables.values()) { if (skipRestoreRemoteTableIds.contains(tblInfo.id)) { continue; } Table tbl = db.getTable(jobInfo.getAliasByOriginNameIfSet(tblInfo.name)); if (tbl == null) { continue; } if (!tbl.isNativeTableOrMaterializedView()) { continue; } OlapTable olapTbl = (OlapTable) tbl; if (olapTbl.getState() == OlapTableState.RESTORE || olapTbl.getState() == OlapTableState.RESTORE_WITH_LOAD) { olapTbl.setState(OlapTableState.NORMAL); } } } public static RestoreJob read(DataInput in) throws IOException { RestoreJob job = new RestoreJob(); job.readFields(in); return job; } @Override public void write(DataOutput out) throws IOException { super.write(out); Text.writeString(out, backupTimestamp); jobInfo.write(out); out.writeBoolean(allowLoad); Text.writeString(out, state.name()); if (backupMeta != null) { out.writeBoolean(true); backupMeta.write(out); } else { out.writeBoolean(false); } fileMapping.write(out); out.writeLong(metaPreparedTime); out.writeLong(snapshotFinishedTime); out.writeLong(downloadFinishedTime); out.writeInt(restoreReplicationNum); out.writeInt(restoredPartitions.size()); for (Pair<String, Partition> entry : restoredPartitions) { Text.writeString(out, entry.first); entry.second.write(out); } out.writeInt(restoredTbls.size()); for (Table tbl : restoredTbls) { tbl.write(out); } out.writeInt(restoredVersionInfo.rowKeySet().size()); for (long tblId : restoredVersionInfo.rowKeySet()) { out.writeLong(tblId); out.writeInt(restoredVersionInfo.row(tblId).size()); for (Map.Entry<Long, Long> entry : restoredVersionInfo.row(tblId).entrySet()) { out.writeLong(entry.getKey()); out.writeLong(entry.getValue()); out.writeLong(0); } } out.writeInt(snapshotInfos.rowKeySet().size()); for (long tabletId : snapshotInfos.rowKeySet()) { out.writeLong(tabletId); Map<Long, SnapshotInfo> map = snapshotInfos.row(tabletId); out.writeInt(map.size()); for (Map.Entry<Long, SnapshotInfo> entry : map.entrySet()) { out.writeLong(entry.getKey()); entry.getValue().write(out); } } } @Override public String toString() { StringBuilder sb = new StringBuilder(super.toString()); sb.append(", backup ts: ").append(backupTimestamp); sb.append(", state: ").append(state.name()); return sb.toString(); } }
Since `outstanding` is a `ConcurrentSkipListMap` I'm presuming it operates on a logical snapshot of the set of initial document IDs where concurrent removes can't mess anything up? It's been a while since I've looked into its semantics 🙂
public void ack(AckToken token) { outstanding.remove((DocumentId) token.ackObject); }
}
public void ack(AckToken token) { outstanding.remove((DocumentId) token.ackObject); }
class LocalVisitorSession implements VisitorSession { private enum State { RUNNING, FAILURE, ABORTED, SUCCESS } private final VisitorDataHandler data; private final VisitorControlHandler control; private final Map<DocumentId, Document> outstanding; private final DocumentSelector selector; private final FieldSet fieldSet; private final AtomicReference<State> state; public LocalVisitorSession(LocalDocumentAccess access, VisitorParameters parameters) throws ParseException { if (parameters.getResumeToken() != null) throw new UnsupportedOperationException("Continuation via progress tokens is not supported"); if (parameters.getRemoteDataHandler() != null) throw new UnsupportedOperationException("Remote data handlers are not supported"); this.selector = new DocumentSelector(parameters.getDocumentSelection()); this.fieldSet = new FieldSetRepo().parse(access.getDocumentTypeManager(), parameters.fieldSet()); this.data = parameters.getLocalDataHandler() == null ? new VisitorDataQueue() : parameters.getLocalDataHandler(); this.data.reset(); this.data.setSession(this); this.control = parameters.getControlHandler() == null ? new VisitorControlHandler() : parameters.getControlHandler(); this.control.reset(); this.control.setSession(this); this.outstanding = new ConcurrentSkipListMap<>(Comparator.comparing(DocumentId::toString)); this.outstanding.putAll(access.documents); this.state = new AtomicReference<>(State.RUNNING); start(); } void start() { new Thread(() -> { try { outstanding.forEach((id, document) -> { if (selector.accepts(new DocumentPut(document)) != Result.TRUE) return; Document copy = new Document(document.getDataType(), document.getId()); for (Field field : document.getDataType().getFields()) if (fieldSet.contains(field)) copy.setFieldValue(field, document.getFieldValue(field)); data.onMessage(new PutDocumentMessage(new DocumentPut(copy)), new AckToken(id)); }); state.updateAndGet(current -> { switch (current) { case RUNNING: control.onDone(VisitorControlHandler.CompletionCode.SUCCESS, "Success"); return State.SUCCESS; case ABORTED: control.onDone(VisitorControlHandler.CompletionCode.ABORTED, "Aborted by user"); return State.ABORTED; default: control.onDone(VisitorControlHandler.CompletionCode.FAILURE, "Unexpected state '" + current + "'");; return State.FAILURE; } }); } catch (Exception e) { state.set(State.FAILURE); outstanding.clear(); control.onDone(VisitorControlHandler.CompletionCode.FAILURE, Exceptions.toMessageString(e)); } finally { data.onDone(); } }).start(); } @Override public boolean isDone() { return outstanding.isEmpty() && control.isDone(); } @Override public ProgressToken getProgress() { throw new UnsupportedOperationException("Progress tokens are not supported"); } @Override public Trace getTrace() { throw new UnsupportedOperationException("Traces are not supported"); } @Override public boolean waitUntilDone(long timeoutMs) throws InterruptedException { return control.waitUntilDone(timeoutMs); } @Override @Override public void abort() { state.updateAndGet(current -> current == State.RUNNING ? State.ABORTED : current); outstanding.clear(); } @Override public VisitorResponse getNext() { return data.getNext(); } @Override public VisitorResponse getNext(int timeoutMilliseconds) throws InterruptedException { return data.getNext(timeoutMilliseconds); } @Override public void destroy() { abort(); } }
class LocalVisitorSession implements VisitorSession { private enum State { RUNNING, FAILURE, ABORTED, SUCCESS } private final VisitorDataHandler data; private final VisitorControlHandler control; private final Map<DocumentId, Document> outstanding; private final DocumentSelector selector; private final FieldSet fieldSet; private final AtomicReference<State> state; public LocalVisitorSession(LocalDocumentAccess access, VisitorParameters parameters) throws ParseException { if (parameters.getResumeToken() != null) throw new UnsupportedOperationException("Continuation via progress tokens is not supported"); if (parameters.getRemoteDataHandler() != null) throw new UnsupportedOperationException("Remote data handlers are not supported"); this.selector = new DocumentSelector(parameters.getDocumentSelection()); this.fieldSet = new FieldSetRepo().parse(access.getDocumentTypeManager(), parameters.fieldSet()); this.data = parameters.getLocalDataHandler() == null ? new VisitorDataQueue() : parameters.getLocalDataHandler(); this.data.reset(); this.data.setSession(this); this.control = parameters.getControlHandler() == null ? new VisitorControlHandler() : parameters.getControlHandler(); this.control.reset(); this.control.setSession(this); this.outstanding = new ConcurrentSkipListMap<>(Comparator.comparing(DocumentId::toString)); this.outstanding.putAll(access.documents); this.state = new AtomicReference<>(State.RUNNING); start(); } void start() { new Thread(() -> { try { outstanding.forEach((id, document) -> { if (state.get() != State.RUNNING) return; if (selector.accepts(new DocumentPut(document)) != Result.TRUE) return; Document copy = new Document(document.getDataType(), document.getId()); new FieldSetRepo().copyFields(document, copy, fieldSet); data.onMessage(new PutDocumentMessage(new DocumentPut(copy)), new AckToken(id)); }); state.updateAndGet(current -> { switch (current) { case RUNNING: control.onDone(VisitorControlHandler.CompletionCode.SUCCESS, "Success"); return State.SUCCESS; case ABORTED: control.onDone(VisitorControlHandler.CompletionCode.ABORTED, "Aborted by user"); return State.ABORTED; default: control.onDone(VisitorControlHandler.CompletionCode.FAILURE, "Unexpected state '" + current + "'");; return State.FAILURE; } }); } catch (Exception e) { state.set(State.FAILURE); outstanding.clear(); control.onDone(VisitorControlHandler.CompletionCode.FAILURE, Exceptions.toMessageString(e)); } finally { data.onDone(); } }).start(); } @Override public boolean isDone() { return outstanding.isEmpty() && control.isDone(); } @Override public ProgressToken getProgress() { throw new UnsupportedOperationException("Progress tokens are not supported"); } @Override public Trace getTrace() { throw new UnsupportedOperationException("Traces are not supported"); } @Override public boolean waitUntilDone(long timeoutMs) throws InterruptedException { return control.waitUntilDone(timeoutMs); } @Override @Override public void abort() { state.updateAndGet(current -> current == State.RUNNING ? State.ABORTED : current); outstanding.clear(); } @Override public VisitorResponse getNext() { return data.getNext(); } @Override public VisitorResponse getNext(int timeoutMilliseconds) throws InterruptedException { return data.getNext(timeoutMilliseconds); } @Override public void destroy() { abort(); } }
Removed that commit.
private void toSlime(Cursor object, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(deploymentSpec) .sortedJobs(instance.deploymentJobs().jobStatus().values()); Cursor deploymentJobsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentJobsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); }); Cursor globalRotationsArray = object.setArray("globalRotations"); instance.endpointsIn(controller.system()) .scope(Endpoint.Scope.global) .legacy(false) .asList().stream() .map(Endpoint::url) .map(URI::toString) .forEach(globalRotationsArray::addString); Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies().get(instance.id()); for (RoutingPolicy policy : routingPolicies) { policy.rotationEndpointsIn(controller.system()).asList().stream() .map(Endpoint::url) .map(URI::toString) .forEach(globalRotationsArray::addString); } List<Deployment> deployments = controller.applications().deploymentTrigger() .steps(deploymentSpec) .sortedDeployments(instance.deployments().values()); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } }
policy.rotationEndpointsIn(controller.system()).asList().stream()
private void toSlime(Cursor object, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(deploymentSpec) .sortedJobs(instance.deploymentJobs().jobStatus().values()); Cursor deploymentJobsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentJobsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); }); Cursor globalRotationsArray = object.setArray("globalRotations"); instance.endpointsIn(controller.system()) .scope(Endpoint.Scope.global) .legacy(false) .asList().stream() .map(Endpoint::url) .map(URI::toString) .forEach(globalRotationsArray::addString); instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies().get(instance.id()); for (RoutingPolicy policy : routingPolicies) { policy.rotationEndpointsIn(controller.system()).asList().stream() .map(Endpoint::url) .map(URI::toString) .forEach(globalRotationsArray::addString); } List<Deployment> deployments = controller.applications().deploymentTrigger() .steps(deploymentSpec) .sortedDeployments(instance.deployments().values()); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } }
class ApplicationApiHandler extends LoggingRequestHandler { private static final String OPTIONAL_PREFIX = "/api"; private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { Path path = new Path(request.getUri(), OPTIONAL_PREFIX); switch (request.getMethod()) { case GET: return handleGET(path, request); case PUT: return handlePUT(path, request); case POST: return handlePOST(path, request); case PATCH: return handlePATCH(path, request); case DELETE: return handleDELETE(path, request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { switch (e.getErrorCode()) { case NOT_FOUND: return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT: return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR: return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e)); default: return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e)); } } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/cost")) return tenantCost(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/cost/{month}")) return tenantCost(path.get("tenant"), path.get("month"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{ignored}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant"); } private HttpResponse authenticatedUser(HttpRequest request) { Principal user = requireUserPrincipal(request); if (user == null) throw new NotAuthorizedException("You must be authenticated."); String userName = user instanceof AthenzPrincipal ? ((AthenzPrincipal) user).getIdentity().getName() : user.getName(); TenantName tenantName = TenantName.from(UserTenant.normalizeUser(userName)); List<Tenant> tenants = controller.tenants().asList(new Credentials(user)); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userName); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant.name().equals(tenantName))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request); return new SlimeJsonResponse(slime); } private HttpResponse tenantCost(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .map(tenant -> tenantCost(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenantCost(Tenant tenant, HttpRequest request) { var slime = new Slime(); var objectCursor = slime.setObject(); var monthsCursor = objectCursor.setArray("months"); return new SlimeJsonResponse(slime); } private HttpResponse tenantCost(String tenantName, String dateString, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .map(tenant -> tenantCost(tenant, tenantCostParseDate(dateString), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private LocalDate tenantCostParseDate(String dateString) { var monthPattern = Pattern.compile("^(?<year>[0-9]{4})-(?<month>[0-9]{2})$"); var matcher = monthPattern.matcher(dateString); if (matcher.matches()) { var year = Integer.parseInt(matcher.group("year")); var month = Integer.parseInt(matcher.group("month")); return LocalDate.of(year, month, 1); } else { throw new IllegalArgumentException("Could not parse year-month '" + dateString + "'"); } } private HttpResponse tenantCost(Tenant tenant, LocalDate month, HttpRequest request) { var slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) { if (applicationName.map(application.id().application().value()::equals).orElse(true)) for (InstanceName instance : application.instances().keySet()) toSlime(application.id().instance(instance), array.addObject(), request); } return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); nodeObject.setString("flavor", node.canonicalFlavor()); nodeObject.setDouble("vcpu", node.vcpu()); nodeObject.setDouble("memoryGb", node.memoryGb()); nodeObject.setDouble("diskGb", node.diskGb()); nodeObject.setDouble("bandwidthGbps", node.bandwidthGbps()); nodeObject.setBool("fastDisk", node.fastDisk()); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { switch (state) { case failed: return "failed"; case parked: return "parked"; case dirty: return "dirty"; case ready: return "ready"; case active: return "active"; case inactive: return "inactive"; case reserved: return "reserved"; case provisioned: return "provisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); } } private static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case unorchestrated: return "unorchestrated"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); } } private static String valueOf(Node.ClusterType type) { switch (type) { case admin: return "admin"; case content: return "content"; case container: return "container"; default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); } } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { logStream.transferTo(outputStream); } }; } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { String triggered = controller.applications().deploymentTrigger() .forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName()) .stream().map(JobType::jobName).collect(joining(", ")); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion"))); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if ( ! application.change().isEmpty()) toSlime(object.setObject("deploying"), application.change()); if ( ! application.outstandingChange().isEmpty()) toSlime(object.setObject("outstandingChange"), application.outstandingChange()); object.setString("compileVersion", compileVersion(application.id()).toFullString()); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : application.instances().values()) toSlime(instancesArray.addObject(), instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, Instance instance, Application application, HttpRequest request) { object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.latestVersion().ifPresent(version -> sourceRevisionToSlime(version.source(), object.setObject("source"))); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if ( ! application.change().isEmpty()) { toSlime(object.setObject("deploying"), application.change()); } if ( ! application.outstandingChange().isEmpty()) { toSlime(object.setObject("outstandingChange"), application.outstandingChange()); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedJobs(instance.deploymentJobs().jobStatus().values()); object.setBool("deployedInternally", application.internal()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); }); object.setString("compileVersion", compileVersion(application.id()).toFullString()); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor globalRotationsArray = object.setArray("globalRotations"); instance.endpointsIn(controller.system()) .scope(Endpoint.Scope.global) .legacy(false) .asList().stream() .map(Endpoint::url) .map(URI::toString) .forEach(globalRotationsArray::addString); Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies().get(instance.id()); for (RoutingPolicy policy : routingPolicies) { policy.rotationEndpointsIn(controller.system()).asList().stream() .map(Endpoint::url) .map(URI::toString) .forEach(globalRotationsArray::addString); } List<Deployment> deployments = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedDeployments(instance.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if (!instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), ZoneId.from(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.application() .filter(version -> !version.isUnknown()) .ifPresent(version -> toSlime(version, object.setObject("revision"))); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); var endpointArray = response.setArray("endpoints"); for (var policy : controller.applications().routingPolicies().get(deploymentId)) { Cursor endpointObject = endpointArray.addObject(); Endpoint endpoint = policy.endpointIn(controller.system()); endpointObject.setString("cluster", policy.cluster().value()); endpointObject.setBool("tls", endpoint.tls()); endpointObject.setString("url", endpoint.url().toString()); } Cursor serviceUrlArray = response.setArray("serviceUrls"); controller.applications().getDeploymentEndpoints(deploymentId) .forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())); response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.applicationVersion().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli())); controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())).projectId() .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.applicationVersion().source(), response); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentCost appCost = new DeploymentCost(Map.of()); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if ( ! applicationVersion.isUnknown()) { object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong()); object.setString("hash", applicationVersion.id()); sourceRevisionToSlime(applicationVersion.source(), object.setObject("source")); } } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } /** * Returns a non-broken, released version at least as old as the oldest platform the given application is on. * * If no known version is applicable, the newest version at least as old as the oldest platform is selected, * among all versions released for this system. If no such versions exists, throws an IllegalStateException. */ private Version compileVersion(TenantAndApplicationId id) { Version oldestPlatform = controller.applications().oldestInstalledPlatform(id); return controller.versionStatus().versions().stream() .filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low)) .filter(VespaVersion::isReleased) .map(VespaVersion::versionNumber) .filter(version -> ! version.isAfter(oldestPlatform)) .max(Comparator.naturalOrder()) .orElseGet(() -> controller.mavenRepository().metadata().versions().stream() .filter(version -> ! version.isAfter(oldestPlatform)) .filter(version -> ! controller.versionStatus().versions().stream() .map(VespaVersion::versionNumber) .collect(Collectors.toSet()).contains(version)) .max(Comparator.naturalOrder()) .orElseThrow(() -> new IllegalStateException("No available releases of " + controller.mavenRepository().artifactId()))); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = ZoneId.from(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = requireUserPrincipal(request).getName(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp); controller.applications().setGlobalRotationStatus(new DeploymentId(instance.id(), deployment.zone()), endpointStatus); return new MessageResponse(String.format("Successfully set %s in %s.%s %s service", instance.id().toShortString(), deployment.zone().environment().value(), deployment.zone().region().value(), inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId); for (RoutingEndpoint endpoint : status.keySet()) { EndpointStatus currentStatus = status.get(endpoint); array.addString(endpoint.upstreamName()); Cursor statusObject = array.addObject(); statusObject.setString("status", currentStatus.getStatus().name()); statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); statusObject.setLong("timestamp", currentStatus.getEpoch()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = ZoneId.from(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse metering(String tenant, String application, HttpRequest request) { Slime slime = new Slime(); Cursor root = slime.setObject(); MeteringInfo meteringInfo = controller.serviceRegistry().meteringService().getResourceSnapshots(tenant, application); ResourceAllocation currentSnapshot = meteringInfo.getCurrentSnapshot(); Cursor currentRate = root.setObject("currentrate"); currentRate.setDouble("cpu", currentSnapshot.getCpuCores()); currentRate.setDouble("mem", currentSnapshot.getMemoryGb()); currentRate.setDouble("disk", currentSnapshot.getDiskGb()); ResourceAllocation thisMonth = meteringInfo.getThisMonth(); Cursor thismonth = root.setObject("thismonth"); thismonth.setDouble("cpu", thisMonth.getCpuCores()); thismonth.setDouble("mem", thisMonth.getMemoryGb()); thismonth.setDouble("disk", thisMonth.getDiskGb()); ResourceAllocation lastMonth = meteringInfo.getLastMonth(); Cursor lastmonth = root.setObject("lastmonth"); lastmonth.setDouble("cpu", lastMonth.getCpuCores()); lastmonth.setDouble("mem", lastMonth.getMemoryGb()); lastmonth.setDouble("disk", lastMonth.getDiskGb()); Map<ApplicationId, List<ResourceSnapshot>> history = meteringInfo.getSnapshotHistory(); Cursor details = root.setObject("details"); Cursor detailsCpu = details.setObject("cpu"); Cursor detailsMem = details.setObject("mem"); Cursor detailsDisk = details.setObject("disk"); history.entrySet().stream() .forEach(entry -> { String instanceName = entry.getKey().instance().value(); Cursor detailsCpuApp = detailsCpu.setObject(instanceName); Cursor detailsMemApp = detailsMem.setObject(instanceName); Cursor detailsDiskApp = detailsDisk.setObject(instanceName); Cursor detailsCpuData = detailsCpuApp.setArray("data"); Cursor detailsMemData = detailsMemApp.setArray("data"); Cursor detailsDiskData = detailsDiskApp.setArray("data"); entry.getValue().stream() .forEach(resourceSnapshot -> { Cursor cpu = detailsCpuData.addObject(); cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli()); cpu.setDouble("value", resourceSnapshot.getCpuCores()); Cursor mem = detailsMemData.addObject(); mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli()); mem.setDouble("value", resourceSnapshot.getMemoryGb()); Cursor disk = detailsDiskData.addObject(); disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli()); disk.setDouble("value", resourceSnapshot.getDiskGb()); }); }); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenant, String application, HttpRequest request) { Application app = controller.applications().requireApplication(TenantAndApplicationId.from(tenant, application)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! app.change().isEmpty()) { app.change().platform().ifPresent(version -> root.setString("platform", version.toString())); app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id())); root.setBool("pinned", app.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { String user = Optional.of(requireUserPrincipal(request)) .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzIdentity::getName) .map(UserTenant::normalizeUser) .orElseThrow(() -> new ForbiddenException("Not authenticated or not a user.")); UserTenant tenant = UserTenant.create(user); try { controller.tenants().createUser(tenant); return new MessageResponse("Created user '" + user + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + user + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user ? Optional.empty() : Optional.of(accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest())); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) { request = controller.auditLogger().log(request); String versionString = readToString(request.getData()); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(id, application -> { Version version = Version.fromString(versionString); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) { controller.auditLogger().log(request); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(id, application -> { Change change = Change.of(application.get().latestVersion().get()); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(id, application -> { Change change = application.get().change(); if (change.isEmpty()) { response.append("No deployment in progress for " + application + " at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '" + change + "' to '" + controller.applications().requireApplication(id).change() + "' for " + application); }); return new MessageResponse(response.toString()); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new); controller.applications().restart(deploymentId, hostname); return new MessageResponse("Requested restart of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); controller.jobController().deploy(id, type, version, applicationPackage); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); /* * Special handling of the proxy application (the only system application with an application package) * Setting any other deployOptions here is not supported for now (e.g. specifying version), but * this might be handy later to handle emergency downgrades. */ boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId); if (isZoneApplication) { String versionStr = deployOptions.field("vespaVersion").asString(); boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null"); if (versionPresent) { throw new RuntimeException("Version not supported for system applications"); } if (controller.versionStatus().isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } /* * Normal applications from here */ Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip")) .map(ApplicationPackage::new); Optional<Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId)); Inspector sourceRevision = deployOptions.field("sourceRevision"); Inspector buildNumber = deployOptions.field("buildNumber"); if (sourceRevision.valid() != buildNumber.valid()) throw new IllegalArgumentException("Source revision and build number must both be provided, or not"); Optional<ApplicationVersion> applicationVersion = Optional.empty(); if (sourceRevision.valid()) { if (applicationPackage.isPresent()) throw new IllegalArgumentException("Application version and application package can't both be provided."); applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision), buildNumber.asLong())); applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId, application.get().internal(), applicationVersion.get())); } boolean deployDirectly = deployOptions.field("deployDirectly").asBool(); Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new); if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) { Optional<Deployment> deployment = controller.applications().getInstance(applicationId) .map(Instance::deployments) .flatMap(deployments -> Optional.ofNullable(deployments.get(zone))); if(deployment.isEmpty()) throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist"); ApplicationVersion version = deployment.get().applicationVersion(); if(version.isUnknown()) throw new IllegalArgumentException("Can't redeploy application, application version is unknown"); applicationVersion = Optional.of(version); vespaVersion = Optional.of(deployment.get().version()); applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId, application.get().internal(), applicationVersion.get())); } DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly, vespaVersion, deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(), aPackage, Optional.of(requireUserPrincipal(request)))); ActivateResult result = controller.applications().deploy(applicationId, zone, applicationPackage, applicationVersion, deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().get(tenantName); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); if (tenant.get().type() == Tenant.Type.user) controller.tenants().deleteUser((UserTenant) tenant.get()); else controller.tenants().delete(tenant.get().name(), accessControlRequests.credentials(tenant.get().name(), toSlime(request.getData()).get(), request.getJDiscRequest())); return tenant(tenant.get(), request); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user ? Optional.empty() : Optional.of(accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest())); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user ? Optional.empty() : Optional.of(accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest())); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); DeploymentId deploymentId = new DeploymentId(instance.id(), ZoneId.from(environment, region)); controller.applications().deactivate(deploymentId.applicationId(), deploymentId.zoneId()); return new MessageResponse("Deactivated " + deploymentId); } private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) { try { DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get()); if ( report.jobType() == JobType.component && controller.applications().requireApplication(TenantAndApplicationId.from(report.applicationId())).internal()) throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " + "longer accepts submissions from Screwdriver v3 jobs. If you need to revert " + "to the old pipeline, please file a ticket at yo/vespa-support and request this."); controller.applications().deploymentTrigger().notifyOfCompletion(report); return new MessageResponse("ok"); } catch (IllegalStateException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } } private HttpResponse testConfig(ApplicationId id, JobType type) { Set<ZoneId> zones = controller.jobController().testedZoneAndProductionZones(id, type); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, controller.applications().clusterEndpoints(id, zones), controller.applications().contentClustersByZone(id, zones))); } private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) { Optional<DeploymentJobs.JobError> jobError = Optional.empty(); if (report.field("jobError").valid()) { jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString())); } ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString()); JobType type = JobType.fromJobName(report.field("jobName").asString()); long buildNumber = report.field("buildNumber").asLong(); if (type == JobType.component) return DeploymentJobs.JobReport.ofComponent(id, report.field("projectId").asLong(), buildNumber, jobError, toSourceRevision(report.field("sourceRevision"))); else return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); List<Application> applications = controller.applications().asList(tenant.name()); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case user: break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); break; } default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) for (Instance instance : application.instances().values()) if (recurseOverApplications(request)) toSlime(applicationArray.addObject(), instance, application, request); else toSlime(instance.id(), applicationArray.addObject(), request); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case user: break; case cloud: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.platform().toFullString()); if (!jobRun.application().isUnknown()) toSlime(jobRun.application(), object.setObject("revision")); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new InternalServerErrorException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static String tenantType(Tenant tenant) { switch (tenant.type()) { case user: return "USER"; case athenz: return "ATHENS"; case cloud: return "CLOUD"; default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName()); } } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private static JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype")); } private static RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); SourceRevision sourceRevision = toSourceRevision(submitOptions); String authorEmail = submitOptions.field("authorEmail").asString(); long projectId = Math.max(1, submitOptions.field("projectId").asLong()); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); if (DeploymentSpec.empty.equals(applicationPackage.deploymentSpec())) throw new IllegalArgumentException("Missing required file 'deployment.xml'"); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), applicationPackage, Optional.of(requireUserPrincipal(request))); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, sourceRevision, authorEmail, projectId, applicationPackage, dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP)); } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("x-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { switch (state) { case in: return "IN"; case out: return "OUT"; } return "UNKNOWN"; } }
class ApplicationApiHandler extends LoggingRequestHandler { private static final String OPTIONAL_PREFIX = "/api"; private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { Path path = new Path(request.getUri(), OPTIONAL_PREFIX); switch (request.getMethod()) { case GET: return handleGET(path, request); case PUT: return handlePUT(path, request); case POST: return handlePOST(path, request); case PATCH: return handlePATCH(path, request); case DELETE: return handleDELETE(path, request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { switch (e.getErrorCode()) { case NOT_FOUND: return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT: return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR: return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e)); default: return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e)); } } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/cost")) return tenantCost(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/cost/{month}")) return tenantCost(path.get("tenant"), path.get("month"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{ignored}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant"); } private HttpResponse authenticatedUser(HttpRequest request) { Principal user = requireUserPrincipal(request); if (user == null) throw new NotAuthorizedException("You must be authenticated."); String userName = user instanceof AthenzPrincipal ? ((AthenzPrincipal) user).getIdentity().getName() : user.getName(); TenantName tenantName = TenantName.from(UserTenant.normalizeUser(userName)); List<Tenant> tenants = controller.tenants().asList(new Credentials(user)); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userName); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant.name().equals(tenantName))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request); return new SlimeJsonResponse(slime); } private HttpResponse tenantCost(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .map(tenant -> tenantCost(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenantCost(Tenant tenant, HttpRequest request) { var slime = new Slime(); var objectCursor = slime.setObject(); var monthsCursor = objectCursor.setArray("months"); return new SlimeJsonResponse(slime); } private HttpResponse tenantCost(String tenantName, String dateString, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .map(tenant -> tenantCost(tenant, tenantCostParseDate(dateString), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private LocalDate tenantCostParseDate(String dateString) { var monthPattern = Pattern.compile("^(?<year>[0-9]{4})-(?<month>[0-9]{2})$"); var matcher = monthPattern.matcher(dateString); if (matcher.matches()) { var year = Integer.parseInt(matcher.group("year")); var month = Integer.parseInt(matcher.group("month")); return LocalDate.of(year, month, 1); } else { throw new IllegalArgumentException("Could not parse year-month '" + dateString + "'"); } } private HttpResponse tenantCost(Tenant tenant, LocalDate month, HttpRequest request) { var slime = new Slime(); slime.setObject(); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) { if (applicationName.map(application.id().application().value()::equals).orElse(true)) for (InstanceName instance : application.instances().keySet()) toSlime(application.id().instance(instance), array.addObject(), request); } return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = ((CloudTenant) controller.tenants().require(TenantName.from(tenantName))).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, id); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); nodeObject.setString("flavor", node.canonicalFlavor()); nodeObject.setDouble("vcpu", node.vcpu()); nodeObject.setDouble("memoryGb", node.memoryGb()); nodeObject.setDouble("diskGb", node.diskGb()); nodeObject.setDouble("bandwidthGbps", node.bandwidthGbps()); nodeObject.setBool("fastDisk", node.fastDisk()); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { switch (state) { case failed: return "failed"; case parked: return "parked"; case dirty: return "dirty"; case ready: return "ready"; case active: return "active"; case inactive: return "inactive"; case reserved: return "reserved"; case provisioned: return "provisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); } } private static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case unorchestrated: return "unorchestrated"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); } } private static String valueOf(Node.ClusterType type) { switch (type) { case admin: return "admin"; case content: return "content"; case container: return "container"; default: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); } } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { logStream.transferTo(outputStream); } }; } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { String triggered = controller.applications().deploymentTrigger() .forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName()) .stream().map(JobType::jobName).collect(joining(", ")); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); application.latestVersion().ifPresent(version -> toSlime(version, object.setObject("latestVersion"))); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if ( ! application.change().isEmpty()) toSlime(object.setObject("deploying"), application.change()); if ( ! application.outstandingChange().isEmpty()) toSlime(object.setObject("outstandingChange"), application.outstandingChange()); object.setString("compileVersion", compileVersion(application.id()).toFullString()); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : application.instances().values()) toSlime(instancesArray.addObject(), instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, Instance instance, Application application, HttpRequest request) { object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.latestVersion().ifPresent(version -> sourceRevisionToSlime(version.source(), object.setObject("source"))); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if ( ! application.change().isEmpty()) { toSlime(object.setObject("deploying"), application.change()); } if ( ! application.outstandingChange().isEmpty()) { toSlime(object.setObject("outstandingChange"), application.outstandingChange()); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedJobs(instance.deploymentJobs().jobStatus().values()); object.setBool("deployedInternally", application.internal()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); }); object.setString("compileVersion", compileVersion(application.id()).toFullString()); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor globalRotationsArray = object.setArray("globalRotations"); instance.endpointsIn(controller.system()) .scope(Endpoint.Scope.global) .legacy(false) .asList().stream() .map(Endpoint::url) .map(URI::toString) .forEach(globalRotationsArray::addString); instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies().get(instance.id()); for (RoutingPolicy policy : routingPolicies) { policy.rotationEndpointsIn(controller.system()).asList().stream() .map(Endpoint::url) .map(URI::toString) .forEach(globalRotationsArray::addString); } List<Deployment> deployments = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedDeployments(instance.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if (!instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), ZoneId.from(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.application() .filter(version -> !version.isUnknown()) .ifPresent(version -> toSlime(version, object.setObject("revision"))); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); var endpointArray = response.setArray("endpoints"); for (var policy : controller.applications().routingPolicies().get(deploymentId)) { Cursor endpointObject = endpointArray.addObject(); Endpoint endpoint = policy.endpointIn(controller.system()); endpointObject.setString("cluster", policy.cluster().value()); endpointObject.setBool("tls", endpoint.tls()); endpointObject.setString("url", endpoint.url().toString()); } Cursor serviceUrlArray = response.setArray("serviceUrls"); controller.applications().getDeploymentEndpoints(deploymentId) .forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())); response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.applicationVersion().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli())); controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())).projectId() .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.applicationVersion().source(), response); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentCost appCost = new DeploymentCost(Map.of()); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if ( ! applicationVersion.isUnknown()) { object.setLong("buildNumber", applicationVersion.buildNumber().getAsLong()); object.setString("hash", applicationVersion.id()); sourceRevisionToSlime(applicationVersion.source(), object.setObject("source")); } } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } /** * Returns a non-broken, released version at least as old as the oldest platform the given application is on. * * If no known version is applicable, the newest version at least as old as the oldest platform is selected, * among all versions released for this system. If no such versions exists, throws an IllegalStateException. */ private Version compileVersion(TenantAndApplicationId id) { Version oldestPlatform = controller.applications().oldestInstalledPlatform(id); return controller.versionStatus().versions().stream() .filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low)) .filter(VespaVersion::isReleased) .map(VespaVersion::versionNumber) .filter(version -> ! version.isAfter(oldestPlatform)) .max(Comparator.naturalOrder()) .orElseGet(() -> controller.mavenRepository().metadata().versions().stream() .filter(version -> ! version.isAfter(oldestPlatform)) .filter(version -> ! controller.versionStatus().versions().stream() .map(VespaVersion::versionNumber) .collect(Collectors.toSet()).contains(version)) .max(Comparator.naturalOrder()) .orElseThrow(() -> new IllegalStateException("No available releases of " + controller.mavenRepository().artifactId()))); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = ZoneId.from(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = requireUserPrincipal(request).getName(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp); controller.applications().setGlobalRotationStatus(new DeploymentId(instance.id(), deployment.zone()), endpointStatus); return new MessageResponse(String.format("Successfully set %s in %s.%s %s service", instance.id().toShortString(), deployment.zone().environment().value(), deployment.zone().region().value(), inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId); for (RoutingEndpoint endpoint : status.keySet()) { EndpointStatus currentStatus = status.get(endpoint); array.addString(endpoint.upstreamName()); Cursor statusObject = array.addObject(); statusObject.setString("status", currentStatus.getStatus().name()); statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); statusObject.setLong("timestamp", currentStatus.getEpoch()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = ZoneId.from(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse metering(String tenant, String application, HttpRequest request) { Slime slime = new Slime(); Cursor root = slime.setObject(); MeteringInfo meteringInfo = controller.serviceRegistry().meteringService().getResourceSnapshots(tenant, application); ResourceAllocation currentSnapshot = meteringInfo.getCurrentSnapshot(); Cursor currentRate = root.setObject("currentrate"); currentRate.setDouble("cpu", currentSnapshot.getCpuCores()); currentRate.setDouble("mem", currentSnapshot.getMemoryGb()); currentRate.setDouble("disk", currentSnapshot.getDiskGb()); ResourceAllocation thisMonth = meteringInfo.getThisMonth(); Cursor thismonth = root.setObject("thismonth"); thismonth.setDouble("cpu", thisMonth.getCpuCores()); thismonth.setDouble("mem", thisMonth.getMemoryGb()); thismonth.setDouble("disk", thisMonth.getDiskGb()); ResourceAllocation lastMonth = meteringInfo.getLastMonth(); Cursor lastmonth = root.setObject("lastmonth"); lastmonth.setDouble("cpu", lastMonth.getCpuCores()); lastmonth.setDouble("mem", lastMonth.getMemoryGb()); lastmonth.setDouble("disk", lastMonth.getDiskGb()); Map<ApplicationId, List<ResourceSnapshot>> history = meteringInfo.getSnapshotHistory(); Cursor details = root.setObject("details"); Cursor detailsCpu = details.setObject("cpu"); Cursor detailsMem = details.setObject("mem"); Cursor detailsDisk = details.setObject("disk"); history.entrySet().stream() .forEach(entry -> { String instanceName = entry.getKey().instance().value(); Cursor detailsCpuApp = detailsCpu.setObject(instanceName); Cursor detailsMemApp = detailsMem.setObject(instanceName); Cursor detailsDiskApp = detailsDisk.setObject(instanceName); Cursor detailsCpuData = detailsCpuApp.setArray("data"); Cursor detailsMemData = detailsMemApp.setArray("data"); Cursor detailsDiskData = detailsDiskApp.setArray("data"); entry.getValue().stream() .forEach(resourceSnapshot -> { Cursor cpu = detailsCpuData.addObject(); cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli()); cpu.setDouble("value", resourceSnapshot.getCpuCores()); Cursor mem = detailsMemData.addObject(); mem.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli()); mem.setDouble("value", resourceSnapshot.getMemoryGb()); Cursor disk = detailsDiskData.addObject(); disk.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli()); disk.setDouble("value", resourceSnapshot.getDiskGb()); }); }); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenant, String application, HttpRequest request) { Application app = controller.applications().requireApplication(TenantAndApplicationId.from(tenant, application)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! app.change().isEmpty()) { app.change().platform().ifPresent(version -> root.setString("platform", version.toString())); app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id())); root.setBool("pinned", app.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { String user = Optional.of(requireUserPrincipal(request)) .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzIdentity::getName) .map(UserTenant::normalizeUser) .orElseThrow(() -> new ForbiddenException("Not authenticated or not a user.")); UserTenant tenant = UserTenant.create(user); try { controller.tenants().createUser(tenant); return new MessageResponse("Created user '" + user + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + user + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user ? Optional.empty() : Optional.of(accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest())); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) { request = controller.auditLogger().log(request); String versionString = readToString(request.getData()); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(id, application -> { Version version = Version.fromString(versionString); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) { controller.auditLogger().log(request); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(id, application -> { Change change = Change.of(application.get().latestVersion().get()); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(id, application -> { Change change = application.get().change(); if (change.isEmpty()) { response.append("No deployment in progress for " + application + " at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '" + change + "' to '" + controller.applications().requireApplication(id).change() + "' for " + application); }); return new MessageResponse(response.toString()); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new); controller.applications().restart(deploymentId, hostname); return new MessageResponse("Requested restart of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); controller.jobController().deploy(id, type, version, applicationPackage); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); /* * Special handling of the proxy application (the only system application with an application package) * Setting any other deployOptions here is not supported for now (e.g. specifying version), but * this might be handy later to handle emergency downgrades. */ boolean isZoneApplication = SystemApplication.proxy.id().equals(applicationId); if (isZoneApplication) { String versionStr = deployOptions.field("vespaVersion").asString(); boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null"); if (versionPresent) { throw new RuntimeException("Version not supported for system applications"); } if (controller.versionStatus().isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(SystemApplication.proxy, zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } /* * Normal applications from here */ Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip")) .map(ApplicationPackage::new); Optional<Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId)); Inspector sourceRevision = deployOptions.field("sourceRevision"); Inspector buildNumber = deployOptions.field("buildNumber"); if (sourceRevision.valid() != buildNumber.valid()) throw new IllegalArgumentException("Source revision and build number must both be provided, or not"); Optional<ApplicationVersion> applicationVersion = Optional.empty(); if (sourceRevision.valid()) { if (applicationPackage.isPresent()) throw new IllegalArgumentException("Application version and application package can't both be provided."); applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision), buildNumber.asLong())); applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId, application.get().internal(), applicationVersion.get())); } boolean deployDirectly = deployOptions.field("deployDirectly").asBool(); Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new); if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) { Optional<Deployment> deployment = controller.applications().getInstance(applicationId) .map(Instance::deployments) .flatMap(deployments -> Optional.ofNullable(deployments.get(zone))); if(deployment.isEmpty()) throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist"); ApplicationVersion version = deployment.get().applicationVersion(); if(version.isUnknown()) throw new IllegalArgumentException("Can't redeploy application, application version is unknown"); applicationVersion = Optional.of(version); vespaVersion = Optional.of(deployment.get().version()); applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId, application.get().internal(), applicationVersion.get())); } DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly, vespaVersion, deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); applicationPackage.ifPresent(aPackage -> controller.applications().verifyApplicationIdentityConfiguration(applicationId.tenant(), aPackage, Optional.of(requireUserPrincipal(request)))); ActivateResult result = controller.applications().deploy(applicationId, zone, applicationPackage, applicationVersion, deployOptionsJsonClass); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().get(tenantName); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); if (tenant.get().type() == Tenant.Type.user) controller.tenants().deleteUser((UserTenant) tenant.get()); else controller.tenants().delete(tenant.get().name(), accessControlRequests.credentials(tenant.get().name(), toSlime(request.getData()).get(), request.getJDiscRequest())); return tenant(tenant.get(), request); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user ? Optional.empty() : Optional.of(accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest())); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user ? Optional.empty() : Optional.of(accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest())); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); DeploymentId deploymentId = new DeploymentId(instance.id(), ZoneId.from(environment, region)); controller.applications().deactivate(deploymentId.applicationId(), deploymentId.zoneId()); return new MessageResponse("Deactivated " + deploymentId); } private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) { try { DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get()); if ( report.jobType() == JobType.component && controller.applications().requireApplication(TenantAndApplicationId.from(report.applicationId())).internal()) throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " + "longer accepts submissions from Screwdriver v3 jobs. If you need to revert " + "to the old pipeline, please file a ticket at yo/vespa-support and request this."); controller.applications().deploymentTrigger().notifyOfCompletion(report); return new MessageResponse("ok"); } catch (IllegalStateException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } } private HttpResponse testConfig(ApplicationId id, JobType type) { Set<ZoneId> zones = controller.jobController().testedZoneAndProductionZones(id, type); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, controller.applications().clusterEndpoints(id, zones), controller.applications().contentClustersByZone(id, zones))); } private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) { Optional<DeploymentJobs.JobError> jobError = Optional.empty(); if (report.field("jobError").valid()) { jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString())); } ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString()); JobType type = JobType.fromJobName(report.field("jobName").asString()); long buildNumber = report.field("buildNumber").asLong(); if (type == JobType.component) return DeploymentJobs.JobReport.ofComponent(id, report.field("projectId").asLong(), buildNumber, jobError, toSourceRevision(report.field("sourceRevision"))); else return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); List<Application> applications = controller.applications().asList(tenant.name()); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case user: break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); break; } default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) for (Instance instance : application.instances().values()) if (recurseOverApplications(request)) toSlime(applicationArray.addObject(), instance, application, request); else toSlime(instance.id(), applicationArray.addObject(), request); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case user: break; case cloud: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.platform().toFullString()); if (!jobRun.application().isUnknown()) toSlime(jobRun.application(), object.setObject("revision")); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new InternalServerErrorException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static String tenantType(Tenant tenant) { switch (tenant.type()) { case user: return "USER"; case athenz: return "ATHENS"; case cloud: return "CLOUD"; default: throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName()); } } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private static JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype")); } private static RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); SourceRevision sourceRevision = toSourceRevision(submitOptions); String authorEmail = submitOptions.field("authorEmail").asString(); long projectId = Math.max(1, submitOptions.field("projectId").asLong()); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); if (DeploymentSpec.empty.equals(applicationPackage.deploymentSpec())) throw new IllegalArgumentException("Missing required file 'deployment.xml'"); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), applicationPackage, Optional.of(requireUserPrincipal(request))); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, sourceRevision, authorEmail, projectId, applicationPackage, dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP)); } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("x-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { switch (state) { case in: return "IN"; case out: return "OUT"; } return "UNKNOWN"; } }
It doesn't matter here ...
private void toSlime(Allocation allocation, Cursor object) { object.setString(tenantIdKey, allocation.owner().tenant().value()); object.setString(applicationIdKey, allocation.owner().application().value()); object.setString(instanceIdKey, allocation.owner().instance().value()); object.setString(serviceIdKey, allocation.membership().stringValue()); object.setLong(restartGenerationKey, allocation.restartGeneration().wanted()); object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current()); object.setBool(removableKey, allocation.isRemovable()); allocation.membership().cluster().vespaVersion() .ifPresent(version -> object.setString(wantedVespaVersionKey, version.toString())); }
.ifPresent(version -> object.setString(wantedVespaVersionKey, version.toString()));
private void toSlime(Allocation allocation, Cursor object) { object.setString(tenantIdKey, allocation.owner().tenant().value()); object.setString(applicationIdKey, allocation.owner().application().value()); object.setString(instanceIdKey, allocation.owner().instance().value()); object.setString(serviceIdKey, allocation.membership().stringValue()); object.setLong(restartGenerationKey, allocation.restartGeneration().wanted()); object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current()); object.setBool(removableKey, allocation.isRemovable()); allocation.membership().cluster().vespaVersion() .ifPresent(version -> object.setString(wantedVespaVersionKey, version.toString())); }
class NodeSerializer { /** The configured node flavors */ private final NodeFlavors flavors; private static final String hostnameKey = "hostname"; private static final String ipAddressesKey = "ipAddresses"; private static final String openStackIdKey = "openStackId"; private static final String parentHostnameKey = "parentHostname"; private static final String historyKey = "history"; private static final String instanceKey = "instance"; private static final String rebootGenerationKey = "rebootGeneration"; private static final String currentRebootGenerationKey = "currentRebootGeneration"; private static final String vespaVersionKey = "vespaVersion"; private static final String hostedVersionKey = "hostedVersion"; private static final String stateVersionKey = "stateVersion"; private static final String failCountKey = "failCount"; private static final String hardwareFailureKey = "hardwareFailure"; private static final String nodeTypeKey = "type"; private static final String wantToRetireKey = "wantToRetire"; private static final String flavorKey = "flavor"; private static final String tenantIdKey = "tenantId"; private static final String applicationIdKey = "applicationId"; private static final String instanceIdKey = "instanceId"; private static final String serviceIdKey = "serviceId"; private static final String restartGenerationKey = "restartGeneration"; private static final String currentRestartGenerationKey = "currentRestartGeneration"; private static final String removableKey = "removable"; private static final String dockerImageKey = "dockerImage"; private static final String wantedVespaVersionKey = "wantedVespaVersion"; private static final String historyEventTypeKey = "type"; private static final String atKey = "at"; private static final String agentKey = "agent"; public NodeSerializer(NodeFlavors flavors) { this.flavors = flavors; } public byte[] toJson(Node node) { try { Slime slime = new Slime(); toSlime(node, slime.setObject()); return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new RuntimeException("Serialization of " + node + " to json failed", e); } } private void toSlime(Node node, Cursor object) { object.setString(hostnameKey, node.hostname()); toSlime(node.ipAddresses(), object.setArray(ipAddressesKey)); object.setString(openStackIdKey, node.openStackId()); node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname)); object.setString(flavorKey, node.flavor().name()); object.setLong(rebootGenerationKey, node.status().reboot().wanted()); object.setLong(currentRebootGenerationKey, node.status().reboot().current()); node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString())); node.status().hostedVersion().ifPresent(version -> object.setString(hostedVersionKey, version.toString())); node.status().stateVersion().ifPresent(version -> object.setString(stateVersionKey, version)); node.status().dockerImage().ifPresent(image -> object.setString(dockerImageKey, image)); object.setLong(failCountKey, node.status().failCount()); node.status().hardwareFailure().ifPresent(failure -> object.setString(hardwareFailureKey, toString(failure))); object.setBool(wantToRetireKey, node.status().wantToRetire()); node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey))); toSlime(node.history(), object.setArray(historyKey)); object.setString(nodeTypeKey, toString(node.type())); } private void toSlime(History history, Cursor array) { for (History.Event event : history.events()) toSlime(event, array.addObject()); } private void toSlime(History.Event event, Cursor object) { object.setString(historyEventTypeKey, toString(event.type())); object.setLong(atKey, event.at().toEpochMilli()); object.setString(agentKey, toString(event.agent())); } private void toSlime(Set<String> ipAddresses, Cursor array) { ipAddresses.forEach(array::addString); } public Node fromJson(Node.State state, byte[] data) { return nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get()); } private Node nodeFromSlime(Node.State state, Inspector object) { return new Node(object.field(openStackIdKey).asString(), ipAddressesFromSlime(object), object.field(hostnameKey).asString(), parentHostnameFromSlime(object), flavorFromSlime(object), statusFromSlime(object), state, allocationFromSlime(object.field(instanceKey)), historyFromSlime(object.field(historyKey)), nodeTypeFromString(object.field(nodeTypeKey).asString())); } private Status statusFromSlime(Inspector object) { return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey), softwareVersionFromSlime(object.field(vespaVersionKey)), softwareVersionFromSlime(object.field(hostedVersionKey)), optionalString(object.field(stateVersionKey)), optionalString(object.field(dockerImageKey)), (int)object.field(failCountKey).asLong(), hardwareFailureFromSlime(object.field(hardwareFailureKey)), object.field(wantToRetireKey).asBool()); } private Flavor flavorFromSlime(Inspector object) { return flavors.getFlavorOrThrow(object.field(flavorKey).asString()); } private Optional<Allocation> allocationFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new Allocation(applicationIdFromSlime(object), clusterMembershipFromSlime(object), generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey), object.field(removableKey).asBool())); } private ApplicationId applicationIdFromSlime(Inspector object) { return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()), ApplicationName.from(object.field(applicationIdKey).asString()), InstanceName.from(object.field(instanceIdKey).asString())); } private History historyFromSlime(Inspector array) { List<History.Event> events = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> { History.Event event = eventFromSlime(item); if (event != null) events.add(event); }); return new History(events); } private History.Event eventFromSlime(Inspector object) { History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString()); if (type == null) return null; Instant at = Instant.ofEpochMilli(object.field(atKey).asLong()); Agent agent = eventAgentFromSlime(object.field(agentKey)); return new History.Event(type, agent, at); } private Generation generationFromSlime(Inspector object, String wantedField, String currentField) { Inspector current = object.field(currentField); return new Generation(object.field(wantedField).asLong(), current.asLong()); } private ClusterMembership clusterMembershipFromSlime(Inspector object) { Optional<Version> vespaVersion; if (object.field(dockerImageKey).valid()) { vespaVersion = optionalString(object.field(dockerImageKey)) .map(DockerImage::new) .map(DockerImage::tagAsVersion); } else { vespaVersion = softwareVersionFromSlime(object.field(wantedVespaVersionKey)); } return ClusterMembership.fromVersion(object.field(serviceIdKey).asString(), vespaVersion); } private Optional<Version> softwareVersionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(Version.fromString(object.asString())); } private Optional<String> parentHostnameFromSlime(Inspector object) { if (object.field(parentHostnameKey).valid()) return Optional.of(object.field(parentHostnameKey).asString()); else return Optional.empty(); } private Set<String> ipAddressesFromSlime(Inspector object) { ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder(); object.field(ipAddressesKey).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString())); return ipAddresses.build(); } private Optional<Status.HardwareFailureType> hardwareFailureFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(hardwareFailureFromString(object.asString())); } /** Returns the event type, or null if this event type should be ignored */ private History.Event.Type eventTypeFromString(String eventTypeString) { switch (eventTypeString) { case "readied" : return History.Event.Type.readied; case "reserved" : return History.Event.Type.reserved; case "activated" : return History.Event.Type.activated; case "retired" : return History.Event.Type.retired; case "deactivated" : return History.Event.Type.deactivated; case "failed" : return History.Event.Type.failed; case "deallocated" : return History.Event.Type.deallocated; case "down" : return History.Event.Type.down; case "requested" : return History.Event.Type.requested; case "rebooted" : return History.Event.Type.rebooted; } throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'"); } private String toString(History.Event.Type nodeEventType) { switch (nodeEventType) { case readied : return "readied"; case reserved : return "reserved"; case activated : return "activated"; case retired : return "retired"; case deactivated : return "deactivated"; case failed : return "failed"; case deallocated : return "deallocated"; case down : return "down"; case requested: return "requested"; case rebooted: return "rebooted"; } throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined"); } private Agent eventAgentFromSlime(Inspector eventAgentField) { if ( ! eventAgentField.valid()) return Agent.system; switch (eventAgentField.asString()) { case "application" : return Agent.application; case "system" : return Agent.system; case "operator" : return Agent.operator; } throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'"); } private String toString(Agent agent) { switch (agent) { case application : return "application"; case system : return "system"; case operator : return "operator"; } throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined"); } private NodeType nodeTypeFromString(String typeString) { switch (typeString) { case "tenant" : return NodeType.tenant; case "host" : return NodeType.host; case "proxy" : return NodeType.proxy; default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'"); } } private String toString(NodeType type) { switch (type) { case tenant: return "tenant"; case host: return "host"; case proxy: return "proxy"; } throw new IllegalArgumentException("Serialized form of '" + type + "' not defined"); } private Status.HardwareFailureType hardwareFailureFromString(String hardwareFailureString) { switch (hardwareFailureString) { case "memory_mcelog" : return Status.HardwareFailureType.memory_mcelog; case "disk_smart" : return Status.HardwareFailureType.disk_smart; case "disk_kernel" : return Status.HardwareFailureType.disk_kernel; case "unknown" : return Status.HardwareFailureType.unknown; default : throw new IllegalArgumentException("Unknown hardware failure '" + hardwareFailureString + "'"); } } private String toString(Status.HardwareFailureType type) { switch (type) { case memory_mcelog: return "memory_mcelog"; case disk_smart: return "disk_smart"; case disk_kernel: return "disk_kernel"; case unknown: return "unknown"; default : throw new IllegalArgumentException("Serialized form of '" + type + " not defined"); } } }
class NodeSerializer { /** The configured node flavors */ private final NodeFlavors flavors; private static final String hostnameKey = "hostname"; private static final String ipAddressesKey = "ipAddresses"; private static final String openStackIdKey = "openStackId"; private static final String parentHostnameKey = "parentHostname"; private static final String historyKey = "history"; private static final String instanceKey = "instance"; private static final String rebootGenerationKey = "rebootGeneration"; private static final String currentRebootGenerationKey = "currentRebootGeneration"; private static final String vespaVersionKey = "vespaVersion"; private static final String hostedVersionKey = "hostedVersion"; private static final String stateVersionKey = "stateVersion"; private static final String failCountKey = "failCount"; private static final String hardwareFailureKey = "hardwareFailure"; private static final String nodeTypeKey = "type"; private static final String wantToRetireKey = "wantToRetire"; private static final String flavorKey = "flavor"; private static final String tenantIdKey = "tenantId"; private static final String applicationIdKey = "applicationId"; private static final String instanceIdKey = "instanceId"; private static final String serviceIdKey = "serviceId"; private static final String restartGenerationKey = "restartGeneration"; private static final String currentRestartGenerationKey = "currentRestartGeneration"; private static final String removableKey = "removable"; private static final String dockerImageKey = "dockerImage"; private static final String wantedVespaVersionKey = "wantedVespaVersion"; private static final String historyEventTypeKey = "type"; private static final String atKey = "at"; private static final String agentKey = "agent"; public NodeSerializer(NodeFlavors flavors) { this.flavors = flavors; } public byte[] toJson(Node node) { try { Slime slime = new Slime(); toSlime(node, slime.setObject()); return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new RuntimeException("Serialization of " + node + " to json failed", e); } } private void toSlime(Node node, Cursor object) { object.setString(hostnameKey, node.hostname()); toSlime(node.ipAddresses(), object.setArray(ipAddressesKey)); object.setString(openStackIdKey, node.openStackId()); node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname)); object.setString(flavorKey, node.flavor().name()); object.setLong(rebootGenerationKey, node.status().reboot().wanted()); object.setLong(currentRebootGenerationKey, node.status().reboot().current()); node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString())); node.status().hostedVersion().ifPresent(version -> object.setString(hostedVersionKey, version.toString())); node.status().stateVersion().ifPresent(version -> object.setString(stateVersionKey, version)); node.status().dockerImage().ifPresent(image -> object.setString(dockerImageKey, image)); object.setLong(failCountKey, node.status().failCount()); node.status().hardwareFailure().ifPresent(failure -> object.setString(hardwareFailureKey, toString(failure))); object.setBool(wantToRetireKey, node.status().wantToRetire()); node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey))); toSlime(node.history(), object.setArray(historyKey)); object.setString(nodeTypeKey, toString(node.type())); } private void toSlime(History history, Cursor array) { for (History.Event event : history.events()) toSlime(event, array.addObject()); } private void toSlime(History.Event event, Cursor object) { object.setString(historyEventTypeKey, toString(event.type())); object.setLong(atKey, event.at().toEpochMilli()); object.setString(agentKey, toString(event.agent())); } private void toSlime(Set<String> ipAddresses, Cursor array) { ipAddresses.forEach(array::addString); } public Node fromJson(Node.State state, byte[] data) { return nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get()); } private Node nodeFromSlime(Node.State state, Inspector object) { return new Node(object.field(openStackIdKey).asString(), ipAddressesFromSlime(object), object.field(hostnameKey).asString(), parentHostnameFromSlime(object), flavorFromSlime(object), statusFromSlime(object), state, allocationFromSlime(object.field(instanceKey)), historyFromSlime(object.field(historyKey)), nodeTypeFromString(object.field(nodeTypeKey).asString())); } private Status statusFromSlime(Inspector object) { return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey), softwareVersionFromSlime(object.field(vespaVersionKey)), softwareVersionFromSlime(object.field(hostedVersionKey)), optionalString(object.field(stateVersionKey)), optionalString(object.field(dockerImageKey)), (int)object.field(failCountKey).asLong(), hardwareFailureFromSlime(object.field(hardwareFailureKey)), object.field(wantToRetireKey).asBool()); } private Flavor flavorFromSlime(Inspector object) { return flavors.getFlavorOrThrow(object.field(flavorKey).asString()); } private Optional<Allocation> allocationFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new Allocation(applicationIdFromSlime(object), clusterMembershipFromSlime(object), generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey), object.field(removableKey).asBool())); } private ApplicationId applicationIdFromSlime(Inspector object) { return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()), ApplicationName.from(object.field(applicationIdKey).asString()), InstanceName.from(object.field(instanceIdKey).asString())); } private History historyFromSlime(Inspector array) { List<History.Event> events = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> { History.Event event = eventFromSlime(item); if (event != null) events.add(event); }); return new History(events); } private History.Event eventFromSlime(Inspector object) { History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString()); if (type == null) return null; Instant at = Instant.ofEpochMilli(object.field(atKey).asLong()); Agent agent = eventAgentFromSlime(object.field(agentKey)); return new History.Event(type, agent, at); } private Generation generationFromSlime(Inspector object, String wantedField, String currentField) { Inspector current = object.field(currentField); return new Generation(object.field(wantedField).asLong(), current.asLong()); } private ClusterMembership clusterMembershipFromSlime(Inspector object) { Optional<Version> vespaVersion; if (object.field(dockerImageKey).valid()) { vespaVersion = optionalString(object.field(dockerImageKey)) .map(DockerImage::new) .map(DockerImage::tagAsVersion); } else { vespaVersion = softwareVersionFromSlime(object.field(wantedVespaVersionKey)); } return ClusterMembership.fromVersion(object.field(serviceIdKey).asString(), vespaVersion); } private Optional<Version> softwareVersionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(Version.fromString(object.asString())); } private Optional<String> parentHostnameFromSlime(Inspector object) { if (object.field(parentHostnameKey).valid()) return Optional.of(object.field(parentHostnameKey).asString()); else return Optional.empty(); } private Set<String> ipAddressesFromSlime(Inspector object) { ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder(); object.field(ipAddressesKey).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString())); return ipAddresses.build(); } private Optional<Status.HardwareFailureType> hardwareFailureFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(hardwareFailureFromString(object.asString())); } /** Returns the event type, or null if this event type should be ignored */ private History.Event.Type eventTypeFromString(String eventTypeString) { switch (eventTypeString) { case "readied" : return History.Event.Type.readied; case "reserved" : return History.Event.Type.reserved; case "activated" : return History.Event.Type.activated; case "retired" : return History.Event.Type.retired; case "deactivated" : return History.Event.Type.deactivated; case "failed" : return History.Event.Type.failed; case "deallocated" : return History.Event.Type.deallocated; case "down" : return History.Event.Type.down; case "requested" : return History.Event.Type.requested; case "rebooted" : return History.Event.Type.rebooted; } throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'"); } private String toString(History.Event.Type nodeEventType) { switch (nodeEventType) { case readied : return "readied"; case reserved : return "reserved"; case activated : return "activated"; case retired : return "retired"; case deactivated : return "deactivated"; case failed : return "failed"; case deallocated : return "deallocated"; case down : return "down"; case requested: return "requested"; case rebooted: return "rebooted"; } throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined"); } private Agent eventAgentFromSlime(Inspector eventAgentField) { if ( ! eventAgentField.valid()) return Agent.system; switch (eventAgentField.asString()) { case "application" : return Agent.application; case "system" : return Agent.system; case "operator" : return Agent.operator; } throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'"); } private String toString(Agent agent) { switch (agent) { case application : return "application"; case system : return "system"; case operator : return "operator"; } throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined"); } private NodeType nodeTypeFromString(String typeString) { switch (typeString) { case "tenant" : return NodeType.tenant; case "host" : return NodeType.host; case "proxy" : return NodeType.proxy; default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'"); } } private String toString(NodeType type) { switch (type) { case tenant: return "tenant"; case host: return "host"; case proxy: return "proxy"; } throw new IllegalArgumentException("Serialized form of '" + type + "' not defined"); } private Status.HardwareFailureType hardwareFailureFromString(String hardwareFailureString) { switch (hardwareFailureString) { case "memory_mcelog" : return Status.HardwareFailureType.memory_mcelog; case "disk_smart" : return Status.HardwareFailureType.disk_smart; case "disk_kernel" : return Status.HardwareFailureType.disk_kernel; case "unknown" : return Status.HardwareFailureType.unknown; default : throw new IllegalArgumentException("Unknown hardware failure '" + hardwareFailureString + "'"); } } private String toString(Status.HardwareFailureType type) { switch (type) { case memory_mcelog: return "memory_mcelog"; case disk_smart: return "disk_smart"; case disk_kernel: return "disk_kernel"; case unknown: return "unknown"; default : throw new IllegalArgumentException("Serialized form of '" + type + " not defined"); } } }
The problem is that any state transition has a reason and an `Agent`, but only `NodeRetirer` has its own `Agent`, it wouldn't improve the log message for the 99% other transitions?
boolean limitedPark(Set<Node> nodesToPark, long limit) { nodesToPark.stream() .limit(limit) .forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getSimpleName())); return limit >= nodesToPark.size(); }
.forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Parked by NodeRetirer, Policy: " + retirementPolicy.getClass().getSimpleName()));
boolean limitedPark(Set<Node> nodesToPark, long limit) { nodesToPark.stream() .limit(limit) .forEach(node -> nodeRepository().park(node.hostname(), Agent.NodeRetirer, "Policy: " + retirementPolicy.getClass().getSimpleName())); return limit >= nodesToPark.size(); }
class NodeRetirer extends Maintainer { private static final Logger log = Logger.getLogger(NodeRetirer.class.getName()); private final RetirementPolicy retirementPolicy; public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) { super(nodeRepository, interval, jobControl); if (! Arrays.asList(applies).contains(zone)) { String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", ")); log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping."); deconstruct(); } this.retirementPolicy = retirementPolicy; } @Override protected void maintain() { retireUnallocated(); } boolean retireUnallocated() { try (Mutex lock = nodeRepository().lockUnallocated()) { List<Node> allNodes = nodeRepository().getNodes(); Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes); long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream() .filter(node -> node.state() == Node.State.ready) .filter(retirementPolicy::shouldRetire) .collect(Collectors.groupingBy( Node::flavor, Collectors.toSet())) .entrySet().stream() .filter(entry -> { Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue(); long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey()); return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor); }).count(); return numFlavorsWithUnsuccessfullyRetiredNodes == 0; } } /** * @param nodesToPark Nodes that we want to park * @param limit Maximum number of nodes we want to park * @return True iff we were able to park all the nodes */ Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) { Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream() .filter(node -> node.state() == Node.State.active) .collect(Collectors.groupingBy(Node::flavor, Collectors.counting())); return allNodes.stream() .filter(node -> node.state() == Node.State.ready) .collect(Collectors.groupingBy(Node::flavor, Collectors.counting())) .entrySet().stream() .collect(Collectors.toMap( Map.Entry::getKey, entry -> { long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L); return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue()); })); } /** * Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active * and ready nodes. */ long getNumSpareNodes(long numActiveNodes, long numReadyNodes) { long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes); return Math.max(0L, numReadyNodes - numNodesToSpare); } }
class NodeRetirer extends Maintainer { private static final Logger log = Logger.getLogger(NodeRetirer.class.getName()); private final RetirementPolicy retirementPolicy; public NodeRetirer(NodeRepository nodeRepository, Zone zone, Duration interval, JobControl jobControl, RetirementPolicy retirementPolicy, Zone... applies) { super(nodeRepository, interval, jobControl); if (! Arrays.asList(applies).contains(zone)) { String targetZones = Arrays.stream(applies).map(Zone::toString).collect(Collectors.joining(", ")); log.info("NodeRetirer should only run in " + targetZones + " and not in " + zone + ", stopping."); deconstruct(); } this.retirementPolicy = retirementPolicy; } @Override protected void maintain() { retireUnallocated(); } boolean retireUnallocated() { try (Mutex lock = nodeRepository().lockUnallocated()) { List<Node> allNodes = nodeRepository().getNodes(); Map<Flavor, Long> numSpareNodesByFlavor = getNumberSpareReadyNodesByFlavor(allNodes); long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream() .filter(node -> node.state() == Node.State.ready) .filter(retirementPolicy::shouldRetire) .collect(Collectors.groupingBy( Node::flavor, Collectors.toSet())) .entrySet().stream() .filter(entry -> { Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue(); long numSpareReadyNodesForFlavor = numSpareNodesByFlavor.get(entry.getKey()); return !limitedPark(nodesThatShouldBeRetiredForFlavor, numSpareReadyNodesForFlavor); }).count(); return numFlavorsWithUnsuccessfullyRetiredNodes == 0; } } /** * @param nodesToPark Nodes that we want to park * @param limit Maximum number of nodes we want to park * @return True iff we were able to park all the nodes */ Map<Flavor, Long> getNumberSpareReadyNodesByFlavor(List<Node> allNodes) { Map<Flavor, Long> numActiveNodesByFlavor = allNodes.stream() .filter(node -> node.state() == Node.State.active) .collect(Collectors.groupingBy(Node::flavor, Collectors.counting())); return allNodes.stream() .filter(node -> node.state() == Node.State.ready) .collect(Collectors.groupingBy(Node::flavor, Collectors.counting())) .entrySet().stream() .collect(Collectors.toMap( Map.Entry::getKey, entry -> { long numActiveNodesByCurrentFlavor = numActiveNodesByFlavor.getOrDefault(entry.getKey(), 0L); return getNumSpareNodes(numActiveNodesByCurrentFlavor, entry.getValue()); })); } /** * Returns number of ready nodes to spare (beyond a safety buffer) for a flavor given its number of active * and ready nodes. */ long getNumSpareNodes(long numActiveNodes, long numReadyNodes) { long numNodesToSpare = (long) Math.ceil(0.1 * numActiveNodes); return Math.max(0L, numReadyNodes - numNodesToSpare); } }
This will conflict with my PR (that was merged earlier today). Sorry :)
private SDField validateTargetField(TemporaryImportedField importedField, DocumentReference reference) { String targetFieldName = importedField.targetFieldName(); SDField targetField = reference.targetSearch().getConcreteField(targetFieldName); if (targetField == null) { fail(importedField, targetFieldAsString(targetFieldName, reference) + ": Not found"); } else if (!targetField.doesAttributing()) { fail(importedField, targetFieldAsString(targetFieldName, reference) + ": Is not an attribute"); } else if (targetField.doesIndexing()) { fail(importedField, targetFieldAsString(targetFieldName, reference) + ": Index not allowed"); } return targetField; }
SDField targetField = reference.targetSearch().getConcreteField(targetFieldName);
private SDField validateTargetField(TemporaryImportedField importedField, DocumentReference reference) { String targetFieldName = importedField.targetFieldName(); Search targetSearch = reference.targetSearch(); if (isImportedField(targetSearch, targetFieldName)) { fail(importedField, targetFieldAsString(targetFieldName, reference) + ": Is an imported field. Not supported"); } SDField targetField = targetSearch.getConcreteField(targetFieldName); if (targetField == null) { fail(importedField, targetFieldAsString(targetFieldName, reference) + ": Not found"); } else if (!targetField.doesAttributing()) { fail(importedField, targetFieldAsString(targetFieldName, reference) + ": Is not an attribute field. Only attribute fields supported"); } else if (targetField.doesIndexing()) { fail(importedField, targetFieldAsString(targetFieldName, reference) + ": Is an index field. Not supported"); } else if (targetField.getDataType() instanceof TensorDataType) { fail(importedField, targetFieldAsString(targetFieldName, reference) + ": Is of type 'tensor'. Not supported"); } return targetField; }
class ImportedFieldsResolver extends Processor { private final Map<String, ImportedField> importedFields = new LinkedHashMap<>(); private final Optional<DocumentReferences> references; public ImportedFieldsResolver(Search search, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) { super(search, deployLogger, rankProfileRegistry, queryProfiles); references = search.getDocument().getDocumentReferences(); } @Override public void process() { search.temporaryImportedFields().get().fields().forEach((name, field) -> resolveImportedField(field)); search.setImportedFields(new ImportedFields(importedFields)); } private void resolveImportedField(TemporaryImportedField importedField) { DocumentReference reference = validateDocumentReference(importedField); SDField targetField = validateTargetField(importedField, reference); importedFields.put(importedField.fieldName(), new ImportedField(importedField.fieldName(), reference, targetField)); } private DocumentReference validateDocumentReference(TemporaryImportedField importedField) { String referenceFieldName = importedField.referenceFieldName(); DocumentReference reference = references.get().referenceMap().get(referenceFieldName); if (reference == null) { fail(importedField, "Reference field '" + referenceFieldName + "' not found"); } return reference; } private static String targetFieldAsString(String targetFieldName, DocumentReference reference) { return "Field '" + targetFieldName + "' via reference field '" + reference.referenceField().getName() + "'"; } private void fail(TemporaryImportedField importedField, String msg) { throw new IllegalArgumentException("For search '" + search.getName() + "', import field '" + importedField.fieldName() + "': " + msg); } }
class ImportedFieldsResolver extends Processor { private final Map<String, ImportedField> importedFields = new LinkedHashMap<>(); private final Optional<DocumentReferences> references; public ImportedFieldsResolver(Search search, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) { super(search, deployLogger, rankProfileRegistry, queryProfiles); references = search.getDocument().getDocumentReferences(); } @Override public void process() { search.temporaryImportedFields().get().fields().forEach((name, field) -> resolveImportedField(field)); search.setImportedFields(new ImportedFields(importedFields)); } private void resolveImportedField(TemporaryImportedField importedField) { DocumentReference reference = validateDocumentReference(importedField); SDField targetField = validateTargetField(importedField, reference); importedFields.put(importedField.fieldName(), new ImportedField(importedField.fieldName(), reference, targetField)); } private DocumentReference validateDocumentReference(TemporaryImportedField importedField) { String referenceFieldName = importedField.referenceFieldName(); DocumentReference reference = references.get().referenceMap().get(referenceFieldName); if (reference == null) { fail(importedField, "Reference field '" + referenceFieldName + "' not found"); } return reference; } private static boolean isImportedField(Search targetSearch, String targetFieldName) { return targetSearch.importedFields().isPresent() && targetSearch.importedFields().get().fields().containsKey(targetFieldName); } private static String targetFieldAsString(String targetFieldName, DocumentReference reference) { return "Field '" + targetFieldName + "' via reference field '" + reference.referenceField().getName() + "'"; } private void fail(TemporaryImportedField importedField, String msg) { throw new IllegalArgumentException("For search '" + search.getName() + "', import field '" + importedField.fieldName() + "': " + msg); } }
Let's call round to avoid confusion on flooring the result.
public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = String.join(",", environment.getConfigServerHosts()); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME) .withIpAddress(nodeInetAddress) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withEnvironment("ATHENS_DOMAIN", environment.getAthensDomain()) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE"); command.withVolume("/etc/hosts", "/etc/hosts"); for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } if (nodeSpec.minMainMemoryAvailableGb.isPresent()) { long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb.get() * 1024); if (minMainMemoryAvailableMb > 0) { command.withMemoryInMb(minMainMemoryAvailableMb); command.withEnvironment("TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } } nodeSpec.minCpuCores.ifPresent(cpuShares -> command.withCpuShares((int) (10 * cpuShares))); logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { docker.connectContainerToNetwork(containerName, "bridge"); docker.startContainer(containerName); setupContainerNetworkingWithScript(containerName); } else { docker.startContainer(containerName); } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); }
nodeSpec.minCpuCores.ifPresent(cpuShares -> command.withCpuShares((int) (10 * cpuShares)));
public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = String.join(",", environment.getConfigServerHosts()); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME) .withIpAddress(nodeInetAddress) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withEnvironment("ATHENS_DOMAIN", environment.getAthensDomain()) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE"); command.withVolume("/etc/hosts", "/etc/hosts"); for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } if (nodeSpec.minMainMemoryAvailableGb.isPresent()) { long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb.get() * 1024); if (minMainMemoryAvailableMb > 0) { command.withMemoryInMb(minMainMemoryAvailableMb); command.withEnvironment("TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } } nodeSpec.minCpuCores.ifPresent(cpuShares -> command.withCpuShares((int) Math.round(10 * cpuShares))); logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { docker.connectContainerToNetwork(containerName, "bridge"); docker.startContainer(containerName); setupContainerNetworkingWithScript(containerName); } else { docker.startContainer(containerName); } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = Defaults.getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] GET_VESPA_VERSION_COMMAND = new String[]{NODE_PROGRAM, "vespa-version"}; private static final String[] RESUME_NODE_COMMAND = new String[] {NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[] {NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[] {NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[] {NODE_PROGRAM, "stop"}; private static final Pattern VESPA_VERSION_PATTERN = Pattern.compile("^(\\S*)$", Pattern.MULTILINE); private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpe_policy_updater"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); } private final Docker docker; private final Environment environment; private final Consumer<List<String>> commandExecutor; private GaugeWrapper numberOfRunningContainersGauge; public DockerOperationsImpl(Docker docker, Environment environment, MetricReceiverWrapper metricReceiver) { this(docker, environment, metricReceiver, DockerOperationsImpl::runCommand); } DockerOperationsImpl(Docker docker, Environment environment, MetricReceiverWrapper metricReceiver, Consumer<List<String>> commandExecutor) { this.docker = docker; this.environment = environment; setMetrics(metricReceiver); this.commandExecutor = commandExecutor; } @Override public Optional<String> getVespaVersion(ContainerName containerName) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); ProcessResult result = docker.executeInContainer(containerName, DockerOperationsImpl.GET_VESPA_VERSION_COMMAND); if (!result.isSuccess()) { logger.warning("Container " + containerName.asString() + ": Command " + Arrays.toString(DockerOperationsImpl.GET_VESPA_VERSION_COMMAND) + " failed: " + result); return Optional.empty(); } Optional<String> vespaVersion = parseVespaVersion(result.getOutput()); if (vespaVersion.isPresent()) { return vespaVersion; } else { logger.warning("Container " + containerName.asString() + ": Failed to parse vespa version from " + result.getOutput()); return Optional.empty(); } } static Optional<String> parseVespaVersion(final String rawVespaVersion) { if (rawVespaVersion == null) return Optional.empty(); final Matcher matcher = VESPA_VERSION_PATTERN.matcher(rawVespaVersion.trim()); return matcher.find() ? Optional.of(matcher.group(1)) : Optional.empty(); } @Override @Override public void removeContainer(final Container existingContainer) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); } @Override public boolean shouldScheduleDownloadOfImage(final DockerImage dockerImage) { return !docker.imageIsDownloaded(dockerImage); } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network */ private void setupContainerNetworkingWithScript(ContainerName containerName) throws IOException { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, new String[]{ "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"}); } @Override public void scheduleDownloadOfImage(ContainerName containerName, final ContainerNodeSpec nodeSpec, Runnable callback) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Schedule async download of " + nodeSpec.wantedDockerImage.get()); final CompletableFuture<DockerImage> asyncPullResult = docker.pullImageAsync(nodeSpec.wantedDockerImage.get()); asyncPullResult.whenComplete((dockerImage, throwable) -> { if (throwable != null) { logger.warning("Failed to pull " + nodeSpec.wantedDockerImage, throwable); return; } assert nodeSpec.wantedDockerImage.get().equals(dockerImage); callback.run(); }); } ProcessResult executeCommandInContainer(ContainerName containerName, String[] command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (! result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String[] command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String[] command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final List<String> wrappedCommand = new LinkedList<>(); wrappedCommand.add("sudo"); wrappedCommand.add("-n"); wrappedCommand.add("nsenter"); wrappedCommand.add(String.format("--net=/host/proc/%d/ns/net", containerPid)); wrappedCommand.add("--"); wrappedCommand.addAll(Arrays.asList(command)); try { commandExecutor.accept(wrappedCommand); } catch (Exception e) { logger.error(String.format("Failed to execute %s in network namespace for %s (PID = %d)", Arrays.toString(command), containerName.asString(), containerPid)); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } private void setMetrics(MetricReceiverWrapper metricReceiver) { Dimensions dimensions = new Dimensions.Builder() .add("host", HostName.getLocalhost()) .add("role", "docker").build(); numberOfRunningContainersGauge = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "containers.running"); numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); } private static void runCommand(List<String> command) { try { final Process process = new ProcessBuilder(command) .redirectErrorStream(true) .start(); final String output = CharStreams.toString(new InputStreamReader(process.getInputStream())); final int resultCode = process.waitFor(); if (resultCode != 0) { throw new RuntimeException("Command " + Joiner.on(' ').join(command) + " failed: " + output); } } catch (IOException|InterruptedException e) { throw new RuntimeException(e); } } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = Defaults.getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] GET_VESPA_VERSION_COMMAND = new String[]{NODE_PROGRAM, "vespa-version"}; private static final String[] RESUME_NODE_COMMAND = new String[] {NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[] {NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[] {NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[] {NODE_PROGRAM, "stop"}; private static final Pattern VESPA_VERSION_PATTERN = Pattern.compile("^(\\S*)$", Pattern.MULTILINE); private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpe_policy_updater"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); } private final Docker docker; private final Environment environment; private final Consumer<List<String>> commandExecutor; private GaugeWrapper numberOfRunningContainersGauge; public DockerOperationsImpl(Docker docker, Environment environment, MetricReceiverWrapper metricReceiver) { this(docker, environment, metricReceiver, DockerOperationsImpl::runCommand); } DockerOperationsImpl(Docker docker, Environment environment, MetricReceiverWrapper metricReceiver, Consumer<List<String>> commandExecutor) { this.docker = docker; this.environment = environment; setMetrics(metricReceiver); this.commandExecutor = commandExecutor; } @Override public Optional<String> getVespaVersion(ContainerName containerName) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); ProcessResult result = docker.executeInContainer(containerName, DockerOperationsImpl.GET_VESPA_VERSION_COMMAND); if (!result.isSuccess()) { logger.warning("Container " + containerName.asString() + ": Command " + Arrays.toString(DockerOperationsImpl.GET_VESPA_VERSION_COMMAND) + " failed: " + result); return Optional.empty(); } Optional<String> vespaVersion = parseVespaVersion(result.getOutput()); if (vespaVersion.isPresent()) { return vespaVersion; } else { logger.warning("Container " + containerName.asString() + ": Failed to parse vespa version from " + result.getOutput()); return Optional.empty(); } } static Optional<String> parseVespaVersion(final String rawVespaVersion) { if (rawVespaVersion == null) return Optional.empty(); final Matcher matcher = VESPA_VERSION_PATTERN.matcher(rawVespaVersion.trim()); return matcher.find() ? Optional.of(matcher.group(1)) : Optional.empty(); } @Override @Override public void removeContainer(final Container existingContainer) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); } @Override public boolean shouldScheduleDownloadOfImage(final DockerImage dockerImage) { return !docker.imageIsDownloaded(dockerImage); } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network */ private void setupContainerNetworkingWithScript(ContainerName containerName) throws IOException { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, new String[]{ "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"}); } @Override public void scheduleDownloadOfImage(ContainerName containerName, final ContainerNodeSpec nodeSpec, Runnable callback) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Schedule async download of " + nodeSpec.wantedDockerImage.get()); final CompletableFuture<DockerImage> asyncPullResult = docker.pullImageAsync(nodeSpec.wantedDockerImage.get()); asyncPullResult.whenComplete((dockerImage, throwable) -> { if (throwable != null) { logger.warning("Failed to pull " + nodeSpec.wantedDockerImage, throwable); return; } assert nodeSpec.wantedDockerImage.get().equals(dockerImage); callback.run(); }); } ProcessResult executeCommandInContainer(ContainerName containerName, String[] command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (! result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String[] command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String[] command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final List<String> wrappedCommand = new LinkedList<>(); wrappedCommand.add("sudo"); wrappedCommand.add("-n"); wrappedCommand.add("nsenter"); wrappedCommand.add(String.format("--net=/host/proc/%d/ns/net", containerPid)); wrappedCommand.add("--"); wrappedCommand.addAll(Arrays.asList(command)); try { commandExecutor.accept(wrappedCommand); } catch (Exception e) { logger.error(String.format("Failed to execute %s in network namespace for %s (PID = %d)", Arrays.toString(command), containerName.asString(), containerPid)); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } private void setMetrics(MetricReceiverWrapper metricReceiver) { Dimensions dimensions = new Dimensions.Builder() .add("host", HostName.getLocalhost()) .add("role", "docker").build(); numberOfRunningContainersGauge = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "containers.running"); numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); } private static void runCommand(List<String> command) { try { final Process process = new ProcessBuilder(command) .redirectErrorStream(true) .start(); final String output = CharStreams.toString(new InputStreamReader(process.getInputStream())); final int resultCode = process.waitFor(); if (resultCode != 0) { throw new RuntimeException("Command " + Joiner.on(' ').join(command) + " failed: " + output); } } catch (IOException|InterruptedException e) { throw new RuntimeException(e); } } }
Fixed.
public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = String.join(",", environment.getConfigServerHosts()); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME) .withIpAddress(nodeInetAddress) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withEnvironment("ATHENS_DOMAIN", environment.getAthensDomain()) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE"); command.withVolume("/etc/hosts", "/etc/hosts"); for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } if (nodeSpec.minMainMemoryAvailableGb.isPresent()) { long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb.get() * 1024); if (minMainMemoryAvailableMb > 0) { command.withMemoryInMb(minMainMemoryAvailableMb); command.withEnvironment("TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } } nodeSpec.minCpuCores.ifPresent(cpuShares -> command.withCpuShares((int) (10 * cpuShares))); logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { docker.connectContainerToNetwork(containerName, "bridge"); docker.startContainer(containerName); setupContainerNetworkingWithScript(containerName); } else { docker.startContainer(containerName); } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); }
nodeSpec.minCpuCores.ifPresent(cpuShares -> command.withCpuShares((int) (10 * cpuShares)));
public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = String.join(",", environment.getConfigServerHosts()); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME) .withIpAddress(nodeInetAddress) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withEnvironment("ATHENS_DOMAIN", environment.getAthensDomain()) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE"); command.withVolume("/etc/hosts", "/etc/hosts"); for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } if (nodeSpec.minMainMemoryAvailableGb.isPresent()) { long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb.get() * 1024); if (minMainMemoryAvailableMb > 0) { command.withMemoryInMb(minMainMemoryAvailableMb); command.withEnvironment("TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } } nodeSpec.minCpuCores.ifPresent(cpuShares -> command.withCpuShares((int) Math.round(10 * cpuShares))); logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { docker.connectContainerToNetwork(containerName, "bridge"); docker.startContainer(containerName); setupContainerNetworkingWithScript(containerName); } else { docker.startContainer(containerName); } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = Defaults.getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] GET_VESPA_VERSION_COMMAND = new String[]{NODE_PROGRAM, "vespa-version"}; private static final String[] RESUME_NODE_COMMAND = new String[] {NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[] {NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[] {NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[] {NODE_PROGRAM, "stop"}; private static final Pattern VESPA_VERSION_PATTERN = Pattern.compile("^(\\S*)$", Pattern.MULTILINE); private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpe_policy_updater"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); } private final Docker docker; private final Environment environment; private final Consumer<List<String>> commandExecutor; private GaugeWrapper numberOfRunningContainersGauge; public DockerOperationsImpl(Docker docker, Environment environment, MetricReceiverWrapper metricReceiver) { this(docker, environment, metricReceiver, DockerOperationsImpl::runCommand); } DockerOperationsImpl(Docker docker, Environment environment, MetricReceiverWrapper metricReceiver, Consumer<List<String>> commandExecutor) { this.docker = docker; this.environment = environment; setMetrics(metricReceiver); this.commandExecutor = commandExecutor; } @Override public Optional<String> getVespaVersion(ContainerName containerName) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); ProcessResult result = docker.executeInContainer(containerName, DockerOperationsImpl.GET_VESPA_VERSION_COMMAND); if (!result.isSuccess()) { logger.warning("Container " + containerName.asString() + ": Command " + Arrays.toString(DockerOperationsImpl.GET_VESPA_VERSION_COMMAND) + " failed: " + result); return Optional.empty(); } Optional<String> vespaVersion = parseVespaVersion(result.getOutput()); if (vespaVersion.isPresent()) { return vespaVersion; } else { logger.warning("Container " + containerName.asString() + ": Failed to parse vespa version from " + result.getOutput()); return Optional.empty(); } } static Optional<String> parseVespaVersion(final String rawVespaVersion) { if (rawVespaVersion == null) return Optional.empty(); final Matcher matcher = VESPA_VERSION_PATTERN.matcher(rawVespaVersion.trim()); return matcher.find() ? Optional.of(matcher.group(1)) : Optional.empty(); } @Override @Override public void removeContainer(final Container existingContainer) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); } @Override public boolean shouldScheduleDownloadOfImage(final DockerImage dockerImage) { return !docker.imageIsDownloaded(dockerImage); } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network */ private void setupContainerNetworkingWithScript(ContainerName containerName) throws IOException { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, new String[]{ "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"}); } @Override public void scheduleDownloadOfImage(ContainerName containerName, final ContainerNodeSpec nodeSpec, Runnable callback) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Schedule async download of " + nodeSpec.wantedDockerImage.get()); final CompletableFuture<DockerImage> asyncPullResult = docker.pullImageAsync(nodeSpec.wantedDockerImage.get()); asyncPullResult.whenComplete((dockerImage, throwable) -> { if (throwable != null) { logger.warning("Failed to pull " + nodeSpec.wantedDockerImage, throwable); return; } assert nodeSpec.wantedDockerImage.get().equals(dockerImage); callback.run(); }); } ProcessResult executeCommandInContainer(ContainerName containerName, String[] command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (! result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String[] command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String[] command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final List<String> wrappedCommand = new LinkedList<>(); wrappedCommand.add("sudo"); wrappedCommand.add("-n"); wrappedCommand.add("nsenter"); wrappedCommand.add(String.format("--net=/host/proc/%d/ns/net", containerPid)); wrappedCommand.add("--"); wrappedCommand.addAll(Arrays.asList(command)); try { commandExecutor.accept(wrappedCommand); } catch (Exception e) { logger.error(String.format("Failed to execute %s in network namespace for %s (PID = %d)", Arrays.toString(command), containerName.asString(), containerPid)); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } private void setMetrics(MetricReceiverWrapper metricReceiver) { Dimensions dimensions = new Dimensions.Builder() .add("host", HostName.getLocalhost()) .add("role", "docker").build(); numberOfRunningContainersGauge = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "containers.running"); numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); } private static void runCommand(List<String> command) { try { final Process process = new ProcessBuilder(command) .redirectErrorStream(true) .start(); final String output = CharStreams.toString(new InputStreamReader(process.getInputStream())); final int resultCode = process.waitFor(); if (resultCode != 0) { throw new RuntimeException("Command " + Joiner.on(' ').join(command) + " failed: " + output); } } catch (IOException|InterruptedException e) { throw new RuntimeException(e); } } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = Defaults.getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] GET_VESPA_VERSION_COMMAND = new String[]{NODE_PROGRAM, "vespa-version"}; private static final String[] RESUME_NODE_COMMAND = new String[] {NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[] {NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[] {NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[] {NODE_PROGRAM, "stop"}; private static final Pattern VESPA_VERSION_PATTERN = Pattern.compile("^(\\S*)$", Pattern.MULTILINE); private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpe_policy_updater"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); } private final Docker docker; private final Environment environment; private final Consumer<List<String>> commandExecutor; private GaugeWrapper numberOfRunningContainersGauge; public DockerOperationsImpl(Docker docker, Environment environment, MetricReceiverWrapper metricReceiver) { this(docker, environment, metricReceiver, DockerOperationsImpl::runCommand); } DockerOperationsImpl(Docker docker, Environment environment, MetricReceiverWrapper metricReceiver, Consumer<List<String>> commandExecutor) { this.docker = docker; this.environment = environment; setMetrics(metricReceiver); this.commandExecutor = commandExecutor; } @Override public Optional<String> getVespaVersion(ContainerName containerName) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); ProcessResult result = docker.executeInContainer(containerName, DockerOperationsImpl.GET_VESPA_VERSION_COMMAND); if (!result.isSuccess()) { logger.warning("Container " + containerName.asString() + ": Command " + Arrays.toString(DockerOperationsImpl.GET_VESPA_VERSION_COMMAND) + " failed: " + result); return Optional.empty(); } Optional<String> vespaVersion = parseVespaVersion(result.getOutput()); if (vespaVersion.isPresent()) { return vespaVersion; } else { logger.warning("Container " + containerName.asString() + ": Failed to parse vespa version from " + result.getOutput()); return Optional.empty(); } } static Optional<String> parseVespaVersion(final String rawVespaVersion) { if (rawVespaVersion == null) return Optional.empty(); final Matcher matcher = VESPA_VERSION_PATTERN.matcher(rawVespaVersion.trim()); return matcher.find() ? Optional.of(matcher.group(1)) : Optional.empty(); } @Override @Override public void removeContainer(final Container existingContainer) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); } @Override public boolean shouldScheduleDownloadOfImage(final DockerImage dockerImage) { return !docker.imageIsDownloaded(dockerImage); } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network */ private void setupContainerNetworkingWithScript(ContainerName containerName) throws IOException { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, new String[]{ "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"}); } @Override public void scheduleDownloadOfImage(ContainerName containerName, final ContainerNodeSpec nodeSpec, Runnable callback) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Schedule async download of " + nodeSpec.wantedDockerImage.get()); final CompletableFuture<DockerImage> asyncPullResult = docker.pullImageAsync(nodeSpec.wantedDockerImage.get()); asyncPullResult.whenComplete((dockerImage, throwable) -> { if (throwable != null) { logger.warning("Failed to pull " + nodeSpec.wantedDockerImage, throwable); return; } assert nodeSpec.wantedDockerImage.get().equals(dockerImage); callback.run(); }); } ProcessResult executeCommandInContainer(ContainerName containerName, String[] command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (! result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String[] command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String[] command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final List<String> wrappedCommand = new LinkedList<>(); wrappedCommand.add("sudo"); wrappedCommand.add("-n"); wrappedCommand.add("nsenter"); wrappedCommand.add(String.format("--net=/host/proc/%d/ns/net", containerPid)); wrappedCommand.add("--"); wrappedCommand.addAll(Arrays.asList(command)); try { commandExecutor.accept(wrappedCommand); } catch (Exception e) { logger.error(String.format("Failed to execute %s in network namespace for %s (PID = %d)", Arrays.toString(command), containerName.asString(), containerPid)); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } private void setMetrics(MetricReceiverWrapper metricReceiver) { Dimensions dimensions = new Dimensions.Builder() .add("host", HostName.getLocalhost()) .add("role", "docker").build(); numberOfRunningContainersGauge = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "containers.running"); numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); } private static void runCommand(List<String> command) { try { final Process process = new ProcessBuilder(command) .redirectErrorStream(true) .start(); final String output = CharStreams.toString(new InputStreamReader(process.getInputStream())); final int resultCode = process.waitFor(); if (resultCode != 0) { throw new RuntimeException("Command " + Joiner.on(' ').join(command) + " failed: " + output); } } catch (IOException|InterruptedException e) { throw new RuntimeException(e); } } }
Change to Arrays.asList(docTypes)
public ContentClusterBuilder docTypes(String ... docTypes) { List<DocType> result = new ArrayList<>(); for (String type : docTypes) { result.add(new DocType(type)); } this.docTypes = result; return this; }
List<DocType> result = new ArrayList<>();
public ContentClusterBuilder docTypes(String ... docTypes) { this.docTypes = Arrays.asList(docTypes).stream(). map(type -> new DocType(type)). collect(Collectors.toList()); return this; }
class DocType { private final String name; private final boolean global; public DocType(String name, boolean global) { this.name = name; this.global = global; } public DocType(String name) { this(name, false); } public String toXml() { return (global ? "<document mode='index' type='" + name + "' global='true'/>" : "<document mode='index' type='" + name + "'/>"); } }
class DocType { private final String name; private final boolean global; public DocType(String name, boolean global) { this.name = name; this.global = global; } public DocType(String name) { this(name, false); } public String toXml() { return (global ? "<document mode='index' type='" + name + "' global='true'/>" : "<document mode='index' type='" + name + "'/>"); } }
Consider changing lambda to method reference: `.map(DocType::toXml)`.
public String getXml() { String xml = "<content version='1.0' id='" + name + "'>\n" + " <redundancy>" + redundancy + "</redundancy>\n" + " <documents>\n" + docTypes.stream().map(type -> type.toXml()).collect(Collectors.joining("\n")) + " </documents>\n" + " <engine>\n" + " <proton>\n" + " <searchable-copies>" + searchableCopies + "</searchable-copies>\n" + getResourceLimitsXml(" ") + " </proton>\n" + " </engine>\n"; if (dispatchXml.isPresent()) { xml += dispatchXml.get(); } return xml + groupXml + "</content>"; }
docTypes.stream().map(type -> type.toXml()).collect(Collectors.joining("\n")) +
public String getXml() { String xml = joinLines("<content version='1.0' id='" + name + "'>", " <redundancy>" + redundancy + "</redundancy>", " <documents>", docTypes.stream().map(DocType::toXml).collect(Collectors.joining("\n")), " </documents>", " <engine>", " <proton>", " <searchable-copies>" + searchableCopies + "</searchable-copies>", getResourceLimitsXml(" "), " </proton>", " </engine>"); if (dispatchXml.isPresent()) { xml += dispatchXml.get(); } return xml + groupXml + "</content>"; }
class DocType { private final String name; private final boolean global; public DocType(String name, boolean global) { this.name = name; this.global = global; } public DocType(String name) { this(name, false); } public String toXml() { return (global ? "<document mode='index' type='" + name + "' global='true'/>" : "<document mode='index' type='" + name + "'/>"); } }
class DocType { private final String name; private final boolean global; public DocType(String name, boolean global) { this.name = name; this.global = global; } public DocType(String name) { this(name, false); } public String toXml() { return (global ? "<document mode='index' type='" + name + "' global='true'/>" : "<document mode='index' type='" + name + "'/>"); } }
Consider having `new ContentClusterBuilder.DocType("global", true),` on a separate line to increase readability
private static ContentCluster createClusterWithGlobalType() throws Exception { return createCluster(new ContentClusterBuilder().docTypes(Arrays.asList(new ContentClusterBuilder.DocType("global", true), new ContentClusterBuilder.DocType("regular"))).getXml(), generateSearchDefinitions(Arrays.asList("global", "regular"))); }
return createCluster(new ContentClusterBuilder().docTypes(Arrays.asList(new ContentClusterBuilder.DocType("global", true),
private static ContentCluster createClusterWithGlobalType() throws Exception { return createCluster(new ContentClusterBuilder().docTypes(Arrays.asList( new ContentClusterBuilder.DocType("global", true), new ContentClusterBuilder.DocType("regular"))).getXml(), createSearchDefinitions("global", "regular")); }
class ContentSearchClusterTest { private static double EPSILON = 0.000001; private static ContentCluster createClusterWithOneDocumentType() throws Exception { return createCluster(new ContentClusterBuilder().getXml()); } private static ContentCluster createClusterWithTwoDocumentType() throws Exception { List<String> docTypes = Arrays.asList("foo", "bar"); return createCluster(new ContentClusterBuilder().docTypes("foo", "bar").getXml(), generateSearchDefinitions(docTypes)); } private static List<String> generateSearchDefinitions(List<String> docTypes) { return docTypes.stream(). map(type -> new com.yahoo.vespa.model.content.utils.SearchDefinitionBuilder().name(type).build()). collect(Collectors.toList()); } private static ProtonConfig getProtonConfig(ContentCluster cluster) { ProtonConfig.Builder protonCfgBuilder = new ProtonConfig.Builder(); cluster.getSearch().getConfig(protonCfgBuilder); return new ProtonConfig(protonCfgBuilder); } private static void assertProtonResourceLimits(double expDiskLimit, double expMemoryLimits, String clusterXml) throws Exception { ProtonConfig cfg = getProtonConfig(createCluster(clusterXml)); assertEquals(expDiskLimit, cfg.writefilter().disklimit(), EPSILON); assertEquals(expMemoryLimits, cfg.writefilter().memorylimit(), EPSILON); } @Test public void requireThatProtonInitializeThreadsIsSet() throws Exception { assertEquals(2, getProtonConfig(createClusterWithOneDocumentType()).initialize().threads()); assertEquals(3, getProtonConfig(createClusterWithTwoDocumentType()).initialize().threads()); } @Test public void requireThatProtonResourceLimitsCanBeSet() throws Exception { assertProtonResourceLimits(0.88, 0.77, new ContentClusterBuilder().protonDiskLimit(0.88).protonMemoryLimit(0.77).getXml()); } @Test public void requireThatOnlyDiskLimitCanBeSet() throws Exception { assertProtonResourceLimits(0.88, 0.8, new ContentClusterBuilder().protonDiskLimit(0.88).getXml()); } @Test public void requireThatOnlyMemoryLimitCanBeSet() throws Exception { assertProtonResourceLimits(0.8, 0.77, new ContentClusterBuilder().protonMemoryLimit(0.77).getXml()); } @Test public void requireThatGloballyDistributedDocumentTypeIsTaggedAsSuch() throws Exception { ProtonConfig cfg = getProtonConfig(createClusterWithGlobalType()); assertEquals(2, cfg.documentdb().size()); assertDocumentDb("global", true, cfg.documentdb(0)); assertDocumentDb("regular", false, cfg.documentdb(1)); } private static void assertDocumentDb(String expName, boolean expGlobal, ProtonConfig.Documentdb db) { assertEquals(expName, db.inputdoctypename()); assertEquals(expGlobal, db.global()); } }
class ContentSearchClusterTest { private static double EPSILON = 0.000001; private static ContentCluster createClusterWithOneDocumentType() throws Exception { return createCluster(new ContentClusterBuilder().getXml()); } private static ContentCluster createClusterWithTwoDocumentType() throws Exception { return createCluster(new ContentClusterBuilder().docTypes("foo", "bar").getXml(), createSearchDefinitions("foo", "bar")); } private static ProtonConfig getProtonConfig(ContentCluster cluster) { ProtonConfig.Builder protonCfgBuilder = new ProtonConfig.Builder(); cluster.getSearch().getConfig(protonCfgBuilder); return new ProtonConfig(protonCfgBuilder); } private static void assertProtonResourceLimits(double expDiskLimit, double expMemoryLimits, String clusterXml) throws Exception { ProtonConfig cfg = getProtonConfig(createCluster(clusterXml)); assertEquals(expDiskLimit, cfg.writefilter().disklimit(), EPSILON); assertEquals(expMemoryLimits, cfg.writefilter().memorylimit(), EPSILON); } @Test public void requireThatProtonInitializeThreadsIsSet() throws Exception { assertEquals(2, getProtonConfig(createClusterWithOneDocumentType()).initialize().threads()); assertEquals(3, getProtonConfig(createClusterWithTwoDocumentType()).initialize().threads()); } @Test public void requireThatProtonResourceLimitsCanBeSet() throws Exception { assertProtonResourceLimits(0.88, 0.77, new ContentClusterBuilder().protonDiskLimit(0.88).protonMemoryLimit(0.77).getXml()); } @Test public void requireThatOnlyDiskLimitCanBeSet() throws Exception { assertProtonResourceLimits(0.88, 0.8, new ContentClusterBuilder().protonDiskLimit(0.88).getXml()); } @Test public void requireThatOnlyMemoryLimitCanBeSet() throws Exception { assertProtonResourceLimits(0.8, 0.77, new ContentClusterBuilder().protonMemoryLimit(0.77).getXml()); } @Test public void requireThatGloballyDistributedDocumentTypeIsTaggedAsSuch() throws Exception { ProtonConfig cfg = getProtonConfig(createClusterWithGlobalType()); assertEquals(2, cfg.documentdb().size()); assertDocumentDb("global", true, cfg.documentdb(0)); assertDocumentDb("regular", false, cfg.documentdb(1)); } private static void assertDocumentDb(String expName, boolean expGlobal, ProtonConfig.Documentdb db) { assertEquals(expName, db.inputdoctypename()); assertEquals(expGlobal, db.global()); } }
No need for Arrays.asList when method uses vararg.
private static ContentCluster createClusterWithGlobalType() throws Exception { return createCluster(new ContentClusterBuilder().docTypes(Arrays.asList(new ContentClusterBuilder.DocType("global", true), new ContentClusterBuilder.DocType("regular"))).getXml(), generateSearchDefinitions(Arrays.asList("global", "regular"))); }
return createCluster(new ContentClusterBuilder().docTypes(Arrays.asList(new ContentClusterBuilder.DocType("global", true),
private static ContentCluster createClusterWithGlobalType() throws Exception { return createCluster(new ContentClusterBuilder().docTypes(Arrays.asList( new ContentClusterBuilder.DocType("global", true), new ContentClusterBuilder.DocType("regular"))).getXml(), createSearchDefinitions("global", "regular")); }
class ContentSearchClusterTest { private static double EPSILON = 0.000001; private static ContentCluster createClusterWithOneDocumentType() throws Exception { return createCluster(new ContentClusterBuilder().getXml()); } private static ContentCluster createClusterWithTwoDocumentType() throws Exception { List<String> docTypes = Arrays.asList("foo", "bar"); return createCluster(new ContentClusterBuilder().docTypes("foo", "bar").getXml(), generateSearchDefinitions(docTypes)); } private static List<String> generateSearchDefinitions(List<String> docTypes) { return docTypes.stream(). map(type -> new com.yahoo.vespa.model.content.utils.SearchDefinitionBuilder().name(type).build()). collect(Collectors.toList()); } private static ProtonConfig getProtonConfig(ContentCluster cluster) { ProtonConfig.Builder protonCfgBuilder = new ProtonConfig.Builder(); cluster.getSearch().getConfig(protonCfgBuilder); return new ProtonConfig(protonCfgBuilder); } private static void assertProtonResourceLimits(double expDiskLimit, double expMemoryLimits, String clusterXml) throws Exception { ProtonConfig cfg = getProtonConfig(createCluster(clusterXml)); assertEquals(expDiskLimit, cfg.writefilter().disklimit(), EPSILON); assertEquals(expMemoryLimits, cfg.writefilter().memorylimit(), EPSILON); } @Test public void requireThatProtonInitializeThreadsIsSet() throws Exception { assertEquals(2, getProtonConfig(createClusterWithOneDocumentType()).initialize().threads()); assertEquals(3, getProtonConfig(createClusterWithTwoDocumentType()).initialize().threads()); } @Test public void requireThatProtonResourceLimitsCanBeSet() throws Exception { assertProtonResourceLimits(0.88, 0.77, new ContentClusterBuilder().protonDiskLimit(0.88).protonMemoryLimit(0.77).getXml()); } @Test public void requireThatOnlyDiskLimitCanBeSet() throws Exception { assertProtonResourceLimits(0.88, 0.8, new ContentClusterBuilder().protonDiskLimit(0.88).getXml()); } @Test public void requireThatOnlyMemoryLimitCanBeSet() throws Exception { assertProtonResourceLimits(0.8, 0.77, new ContentClusterBuilder().protonMemoryLimit(0.77).getXml()); } @Test public void requireThatGloballyDistributedDocumentTypeIsTaggedAsSuch() throws Exception { ProtonConfig cfg = getProtonConfig(createClusterWithGlobalType()); assertEquals(2, cfg.documentdb().size()); assertDocumentDb("global", true, cfg.documentdb(0)); assertDocumentDb("regular", false, cfg.documentdb(1)); } private static void assertDocumentDb(String expName, boolean expGlobal, ProtonConfig.Documentdb db) { assertEquals(expName, db.inputdoctypename()); assertEquals(expGlobal, db.global()); } }
class ContentSearchClusterTest { private static double EPSILON = 0.000001; private static ContentCluster createClusterWithOneDocumentType() throws Exception { return createCluster(new ContentClusterBuilder().getXml()); } private static ContentCluster createClusterWithTwoDocumentType() throws Exception { return createCluster(new ContentClusterBuilder().docTypes("foo", "bar").getXml(), createSearchDefinitions("foo", "bar")); } private static ProtonConfig getProtonConfig(ContentCluster cluster) { ProtonConfig.Builder protonCfgBuilder = new ProtonConfig.Builder(); cluster.getSearch().getConfig(protonCfgBuilder); return new ProtonConfig(protonCfgBuilder); } private static void assertProtonResourceLimits(double expDiskLimit, double expMemoryLimits, String clusterXml) throws Exception { ProtonConfig cfg = getProtonConfig(createCluster(clusterXml)); assertEquals(expDiskLimit, cfg.writefilter().disklimit(), EPSILON); assertEquals(expMemoryLimits, cfg.writefilter().memorylimit(), EPSILON); } @Test public void requireThatProtonInitializeThreadsIsSet() throws Exception { assertEquals(2, getProtonConfig(createClusterWithOneDocumentType()).initialize().threads()); assertEquals(3, getProtonConfig(createClusterWithTwoDocumentType()).initialize().threads()); } @Test public void requireThatProtonResourceLimitsCanBeSet() throws Exception { assertProtonResourceLimits(0.88, 0.77, new ContentClusterBuilder().protonDiskLimit(0.88).protonMemoryLimit(0.77).getXml()); } @Test public void requireThatOnlyDiskLimitCanBeSet() throws Exception { assertProtonResourceLimits(0.88, 0.8, new ContentClusterBuilder().protonDiskLimit(0.88).getXml()); } @Test public void requireThatOnlyMemoryLimitCanBeSet() throws Exception { assertProtonResourceLimits(0.8, 0.77, new ContentClusterBuilder().protonMemoryLimit(0.77).getXml()); } @Test public void requireThatGloballyDistributedDocumentTypeIsTaggedAsSuch() throws Exception { ProtonConfig cfg = getProtonConfig(createClusterWithGlobalType()); assertEquals(2, cfg.documentdb().size()); assertDocumentDb("global", true, cfg.documentdb(0)); assertDocumentDb("regular", false, cfg.documentdb(1)); } private static void assertDocumentDb(String expName, boolean expGlobal, ProtonConfig.Documentdb db) { assertEquals(expName, db.inputdoctypename()); assertEquals(expGlobal, db.global()); } }
Why not? Isn't one of the scenarios we want to cover "intermittent network issue causes many nodes to appear down"?
private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes().stream() .filter(n -> n.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) .collect(Collectors.toList()); long recentlyFailedNodes = nodes.stream() .map(n -> n.history().event(History.Event.Type.failed)) .filter(Optional::isPresent) .map(Optional::get) .filter(failedEvent -> failedEvent.at().isAfter(startOfThrottleWindow)) .count(); boolean throttle = recentlyFailedNodes >= Math.max(nodes.size() * throttlePolicy.fractionAllowedToFail, throttlePolicy.minimumAllowedToFail); if (throttle) { log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toString())); } return throttle; }
private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes().stream() .filter(n -> n.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) .collect(Collectors.toList()); long recentlyFailedNodes = nodes.stream() .map(n -> n.history().event(History.Event.Type.failed)) .filter(Optional::isPresent) .map(Optional::get) .filter(failedEvent -> failedEvent.at().isAfter(startOfThrottleWindow)) .count(); boolean throttle = recentlyFailedNodes >= Math.max(nodes.size() * throttlePolicy.fractionAllowedToFail, throttlePolicy.minimumAllowedToFail); if (throttle) { log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString())); } return throttle; }
class NodeFailer extends Maintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy) { super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5))); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; } @Override protected void maintain() { updateNodeLivenessEventsForReadyNodes(); for (Node node : readyNodesWhichAreDead()) { if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER || node.type() == NodeType.host) continue; if (!throttle(node)) nodeRepository().fail(node.hostname(), "Not receiving config requests from node"); } for (Node node : readyNodesWithHardwareFailure()) if (!throttle(node)) nodeRepository().fail(node.hostname(), "Node has hardware failure"); for (Node node : determineActiveNodeDownStatus()) { Instant graceTimeEnd = node.history().event(History.Event.Type.down).get().at().plus(downTimeLimit); if (graceTimeEnd.isBefore(clock.instant()) && ! applicationSuspended(node) && failAllowedFor(node.type())) if (!throttle(node)) failActive(node, "Node has been down longer than " + downTimeLimit); } } private void updateNodeLivenessEventsForReadyNodes() { try (Mutex lock = nodeRepository().lockUnallocated()) { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if ( ! lastLocalRequest.isPresent()) continue; Optional<History.Event> recordedRequest = node.history().event(History.Event.Type.requested); if ( ! recordedRequest.isPresent() || recordedRequest.get().at().isBefore(lastLocalRequest.get())) { History updatedHistory = node.history().with(new History.Event(History.Event.Type.requested, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory)); } } } } private List<Node> readyNodesWhichAreDead() { if (constructionTime.isAfter(clock.instant().minus(nodeRequestInterval).minus(nodeRequestInterval) )) return Collections.emptyList(); Instant oldestAcceptableRequestTime = clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); return nodeRepository().getNodes(Node.State.ready).stream() .filter(node -> wasMadeReadyBefore(oldestAcceptableRequestTime, node)) .filter(node -> ! hasRecordedRequestAfter(oldestAcceptableRequestTime, node)) .collect(Collectors.toList()); } private boolean wasMadeReadyBefore(Instant instant, Node node) { Optional<History.Event> readiedEvent = node.history().event(History.Event.Type.readied); if ( ! readiedEvent.isPresent()) return false; return readiedEvent.get().at().isBefore(instant); } private boolean hasRecordedRequestAfter(Instant instant, Node node) { Optional<History.Event> lastRequest = node.history().event(History.Event.Type.requested); if ( ! lastRequest.isPresent()) return false; return lastRequest.get().at().isAfter(instant); } private List<Node> readyNodesWithHardwareFailure() { return nodeRepository().getNodes(Node.State.ready).stream() .filter(node -> node.status().hardwareFailure().isPresent()) .collect(Collectors.toList()); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * However, nodes of other types are not replaced (because all of the type are used by a single application), * so we only allow one to be in failed at any point in time to protect against runaway failing. */ private boolean failAllowedFor(NodeType nodeType) { if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true; return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; } /** * If the node is positively DOWN, and there is no "down" history record, we add it. * If the node is positively UP we remove any "down" history record. * * @return a list of all nodes which are positively currently in the down state */ private List<Node> determineActiveNodeDownStatus() { List<Node> downNodes = new ArrayList<>(); for (ApplicationInstance<ServiceMonitorStatus> application : serviceMonitor.queryStatusOfAllApplicationInstances().values()) { for (ServiceCluster<ServiceMonitorStatus> cluster : application.serviceClusters()) { for (ServiceInstance<ServiceMonitorStatus> service : cluster.serviceInstances()) { Optional<Node> node = nodeRepository().getNode(service.hostName().s(), Node.State.active); if ( ! node.isPresent()) continue; if (service.serviceStatus().equals(ServiceMonitorStatus.DOWN)) downNodes.add(recordAsDown(node.get())); else if (service.serviceStatus().equals(ServiceMonitorStatus.UP)) clearDownRecord(node.get()); } } } return downNodes; } /** * Record a node as down if not already recorded and returns the node in the new state. * This assumes the node is found in the node * repo and that the node is allocated. If we get here otherwise something is truly odd. */ private Node recordAsDown(Node node) { if (node.history().event(History.Event.Type.down).isPresent()) return node; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); return nodeRepository().write(node.downAt(clock.instant())); } } private void clearDownRecord(Node node) { if ( ! node.history().event(History.Event.Type.down).isPresent()) return; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); nodeRepository().write(node.up()); } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if ( ! deployment.isPresent()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; for (Node failingTenantNode : nodeRepository().getChildNodes(node.hostname())) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reason); } else { nodeRepository().fail(failingTenantNode.hostname(), reason); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), reason); try { deployment.get().activate(); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname()); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ @Override public String toString() { return "Node failer"; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.01, 2), disabled(Duration.ZERO, 0, 0); public final Duration throttleWindow; public final double fractionAllowedToFail; public final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } @Override public String toString() { return String.format("Max %.0f%% or %d nodes can fail over a period of %s", fractionAllowedToFail*100, minimumAllowedToFail, throttleWindow); } } }
class NodeFailer extends Maintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy) { super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5))); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; } @Override protected void maintain() { updateNodeLivenessEventsForReadyNodes(); for (Node node : readyNodesWhichAreDead()) { if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER || node.type() == NodeType.host) continue; if (!throttle(node)) nodeRepository().fail(node.hostname(), "Not receiving config requests from node"); } for (Node node : readyNodesWithHardwareFailure()) if (!throttle(node)) nodeRepository().fail(node.hostname(), "Node has hardware failure"); for (Node node : determineActiveNodeDownStatus()) { Instant graceTimeEnd = node.history().event(History.Event.Type.down).get().at().plus(downTimeLimit); if (graceTimeEnd.isBefore(clock.instant()) && ! applicationSuspended(node) && failAllowedFor(node.type())) if (!throttle(node)) failActive(node, "Node has been down longer than " + downTimeLimit); } } private void updateNodeLivenessEventsForReadyNodes() { try (Mutex lock = nodeRepository().lockUnallocated()) { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if ( ! lastLocalRequest.isPresent()) continue; Optional<History.Event> recordedRequest = node.history().event(History.Event.Type.requested); if ( ! recordedRequest.isPresent() || recordedRequest.get().at().isBefore(lastLocalRequest.get())) { History updatedHistory = node.history().with(new History.Event(History.Event.Type.requested, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory)); } } } } private List<Node> readyNodesWhichAreDead() { if (constructionTime.isAfter(clock.instant().minus(nodeRequestInterval).minus(nodeRequestInterval) )) return Collections.emptyList(); Instant oldestAcceptableRequestTime = clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); return nodeRepository().getNodes(Node.State.ready).stream() .filter(node -> wasMadeReadyBefore(oldestAcceptableRequestTime, node)) .filter(node -> ! hasRecordedRequestAfter(oldestAcceptableRequestTime, node)) .collect(Collectors.toList()); } private boolean wasMadeReadyBefore(Instant instant, Node node) { Optional<History.Event> readiedEvent = node.history().event(History.Event.Type.readied); if ( ! readiedEvent.isPresent()) return false; return readiedEvent.get().at().isBefore(instant); } private boolean hasRecordedRequestAfter(Instant instant, Node node) { Optional<History.Event> lastRequest = node.history().event(History.Event.Type.requested); if ( ! lastRequest.isPresent()) return false; return lastRequest.get().at().isAfter(instant); } private List<Node> readyNodesWithHardwareFailure() { return nodeRepository().getNodes(Node.State.ready).stream() .filter(node -> node.status().hardwareFailure().isPresent()) .collect(Collectors.toList()); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * However, nodes of other types are not replaced (because all of the type are used by a single application), * so we only allow one to be in failed at any point in time to protect against runaway failing. */ private boolean failAllowedFor(NodeType nodeType) { if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true; return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; } /** * If the node is positively DOWN, and there is no "down" history record, we add it. * If the node is positively UP we remove any "down" history record. * * @return a list of all nodes which are positively currently in the down state */ private List<Node> determineActiveNodeDownStatus() { List<Node> downNodes = new ArrayList<>(); for (ApplicationInstance<ServiceMonitorStatus> application : serviceMonitor.queryStatusOfAllApplicationInstances().values()) { for (ServiceCluster<ServiceMonitorStatus> cluster : application.serviceClusters()) { for (ServiceInstance<ServiceMonitorStatus> service : cluster.serviceInstances()) { Optional<Node> node = nodeRepository().getNode(service.hostName().s(), Node.State.active); if ( ! node.isPresent()) continue; if (service.serviceStatus().equals(ServiceMonitorStatus.DOWN)) downNodes.add(recordAsDown(node.get())); else if (service.serviceStatus().equals(ServiceMonitorStatus.UP)) clearDownRecord(node.get()); } } } return downNodes; } /** * Record a node as down if not already recorded and returns the node in the new state. * This assumes the node is found in the node * repo and that the node is allocated. If we get here otherwise something is truly odd. */ private Node recordAsDown(Node node) { if (node.history().event(History.Event.Type.down).isPresent()) return node; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); return nodeRepository().write(node.downAt(clock.instant())); } } private void clearDownRecord(Node node) { if ( ! node.history().event(History.Event.Type.down).isPresent()) return; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); nodeRepository().write(node.up()); } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if ( ! deployment.isPresent()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; for (Node failingTenantNode : nodeRepository().getChildNodes(node.hostname())) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reason); } else { nodeRepository().fail(failingTenantNode.hostname(), reason); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), reason); try { deployment.get().activate(); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname()); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ @Override public String toString() { return "Node failer"; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.01, 2), disabled(Duration.ZERO, 0, 0); public final Duration throttleWindow; public final double fractionAllowedToFail; public final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public String toHumanReadableString() { return String.format("Max %.0f%% or %d nodes can fail over a period of %s", fractionAllowedToFail*100, minimumAllowedToFail, throttleWindow); } } }
Any hardware/network issue on the host will also affect all its containers. We don't want to start throttling just because a single Docker host (and all of its containers) fail.
private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes().stream() .filter(n -> n.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) .collect(Collectors.toList()); long recentlyFailedNodes = nodes.stream() .map(n -> n.history().event(History.Event.Type.failed)) .filter(Optional::isPresent) .map(Optional::get) .filter(failedEvent -> failedEvent.at().isAfter(startOfThrottleWindow)) .count(); boolean throttle = recentlyFailedNodes >= Math.max(nodes.size() * throttlePolicy.fractionAllowedToFail, throttlePolicy.minimumAllowedToFail); if (throttle) { log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toString())); } return throttle; }
private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes().stream() .filter(n -> n.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) .collect(Collectors.toList()); long recentlyFailedNodes = nodes.stream() .map(n -> n.history().event(History.Event.Type.failed)) .filter(Optional::isPresent) .map(Optional::get) .filter(failedEvent -> failedEvent.at().isAfter(startOfThrottleWindow)) .count(); boolean throttle = recentlyFailedNodes >= Math.max(nodes.size() * throttlePolicy.fractionAllowedToFail, throttlePolicy.minimumAllowedToFail); if (throttle) { log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString())); } return throttle; }
class NodeFailer extends Maintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy) { super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5))); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; } @Override protected void maintain() { updateNodeLivenessEventsForReadyNodes(); for (Node node : readyNodesWhichAreDead()) { if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER || node.type() == NodeType.host) continue; if (!throttle(node)) nodeRepository().fail(node.hostname(), "Not receiving config requests from node"); } for (Node node : readyNodesWithHardwareFailure()) if (!throttle(node)) nodeRepository().fail(node.hostname(), "Node has hardware failure"); for (Node node : determineActiveNodeDownStatus()) { Instant graceTimeEnd = node.history().event(History.Event.Type.down).get().at().plus(downTimeLimit); if (graceTimeEnd.isBefore(clock.instant()) && ! applicationSuspended(node) && failAllowedFor(node.type())) if (!throttle(node)) failActive(node, "Node has been down longer than " + downTimeLimit); } } private void updateNodeLivenessEventsForReadyNodes() { try (Mutex lock = nodeRepository().lockUnallocated()) { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if ( ! lastLocalRequest.isPresent()) continue; Optional<History.Event> recordedRequest = node.history().event(History.Event.Type.requested); if ( ! recordedRequest.isPresent() || recordedRequest.get().at().isBefore(lastLocalRequest.get())) { History updatedHistory = node.history().with(new History.Event(History.Event.Type.requested, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory)); } } } } private List<Node> readyNodesWhichAreDead() { if (constructionTime.isAfter(clock.instant().minus(nodeRequestInterval).minus(nodeRequestInterval) )) return Collections.emptyList(); Instant oldestAcceptableRequestTime = clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); return nodeRepository().getNodes(Node.State.ready).stream() .filter(node -> wasMadeReadyBefore(oldestAcceptableRequestTime, node)) .filter(node -> ! hasRecordedRequestAfter(oldestAcceptableRequestTime, node)) .collect(Collectors.toList()); } private boolean wasMadeReadyBefore(Instant instant, Node node) { Optional<History.Event> readiedEvent = node.history().event(History.Event.Type.readied); if ( ! readiedEvent.isPresent()) return false; return readiedEvent.get().at().isBefore(instant); } private boolean hasRecordedRequestAfter(Instant instant, Node node) { Optional<History.Event> lastRequest = node.history().event(History.Event.Type.requested); if ( ! lastRequest.isPresent()) return false; return lastRequest.get().at().isAfter(instant); } private List<Node> readyNodesWithHardwareFailure() { return nodeRepository().getNodes(Node.State.ready).stream() .filter(node -> node.status().hardwareFailure().isPresent()) .collect(Collectors.toList()); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * However, nodes of other types are not replaced (because all of the type are used by a single application), * so we only allow one to be in failed at any point in time to protect against runaway failing. */ private boolean failAllowedFor(NodeType nodeType) { if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true; return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; } /** * If the node is positively DOWN, and there is no "down" history record, we add it. * If the node is positively UP we remove any "down" history record. * * @return a list of all nodes which are positively currently in the down state */ private List<Node> determineActiveNodeDownStatus() { List<Node> downNodes = new ArrayList<>(); for (ApplicationInstance<ServiceMonitorStatus> application : serviceMonitor.queryStatusOfAllApplicationInstances().values()) { for (ServiceCluster<ServiceMonitorStatus> cluster : application.serviceClusters()) { for (ServiceInstance<ServiceMonitorStatus> service : cluster.serviceInstances()) { Optional<Node> node = nodeRepository().getNode(service.hostName().s(), Node.State.active); if ( ! node.isPresent()) continue; if (service.serviceStatus().equals(ServiceMonitorStatus.DOWN)) downNodes.add(recordAsDown(node.get())); else if (service.serviceStatus().equals(ServiceMonitorStatus.UP)) clearDownRecord(node.get()); } } } return downNodes; } /** * Record a node as down if not already recorded and returns the node in the new state. * This assumes the node is found in the node * repo and that the node is allocated. If we get here otherwise something is truly odd. */ private Node recordAsDown(Node node) { if (node.history().event(History.Event.Type.down).isPresent()) return node; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); return nodeRepository().write(node.downAt(clock.instant())); } } private void clearDownRecord(Node node) { if ( ! node.history().event(History.Event.Type.down).isPresent()) return; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); nodeRepository().write(node.up()); } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if ( ! deployment.isPresent()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; for (Node failingTenantNode : nodeRepository().getChildNodes(node.hostname())) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reason); } else { nodeRepository().fail(failingTenantNode.hostname(), reason); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), reason); try { deployment.get().activate(); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname()); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ @Override public String toString() { return "Node failer"; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.01, 2), disabled(Duration.ZERO, 0, 0); public final Duration throttleWindow; public final double fractionAllowedToFail; public final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } @Override public String toString() { return String.format("Max %.0f%% or %d nodes can fail over a period of %s", fractionAllowedToFail*100, minimumAllowedToFail, throttleWindow); } } }
class NodeFailer extends Maintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy) { super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5))); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; } @Override protected void maintain() { updateNodeLivenessEventsForReadyNodes(); for (Node node : readyNodesWhichAreDead()) { if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER || node.type() == NodeType.host) continue; if (!throttle(node)) nodeRepository().fail(node.hostname(), "Not receiving config requests from node"); } for (Node node : readyNodesWithHardwareFailure()) if (!throttle(node)) nodeRepository().fail(node.hostname(), "Node has hardware failure"); for (Node node : determineActiveNodeDownStatus()) { Instant graceTimeEnd = node.history().event(History.Event.Type.down).get().at().plus(downTimeLimit); if (graceTimeEnd.isBefore(clock.instant()) && ! applicationSuspended(node) && failAllowedFor(node.type())) if (!throttle(node)) failActive(node, "Node has been down longer than " + downTimeLimit); } } private void updateNodeLivenessEventsForReadyNodes() { try (Mutex lock = nodeRepository().lockUnallocated()) { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if ( ! lastLocalRequest.isPresent()) continue; Optional<History.Event> recordedRequest = node.history().event(History.Event.Type.requested); if ( ! recordedRequest.isPresent() || recordedRequest.get().at().isBefore(lastLocalRequest.get())) { History updatedHistory = node.history().with(new History.Event(History.Event.Type.requested, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory)); } } } } private List<Node> readyNodesWhichAreDead() { if (constructionTime.isAfter(clock.instant().minus(nodeRequestInterval).minus(nodeRequestInterval) )) return Collections.emptyList(); Instant oldestAcceptableRequestTime = clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); return nodeRepository().getNodes(Node.State.ready).stream() .filter(node -> wasMadeReadyBefore(oldestAcceptableRequestTime, node)) .filter(node -> ! hasRecordedRequestAfter(oldestAcceptableRequestTime, node)) .collect(Collectors.toList()); } private boolean wasMadeReadyBefore(Instant instant, Node node) { Optional<History.Event> readiedEvent = node.history().event(History.Event.Type.readied); if ( ! readiedEvent.isPresent()) return false; return readiedEvent.get().at().isBefore(instant); } private boolean hasRecordedRequestAfter(Instant instant, Node node) { Optional<History.Event> lastRequest = node.history().event(History.Event.Type.requested); if ( ! lastRequest.isPresent()) return false; return lastRequest.get().at().isAfter(instant); } private List<Node> readyNodesWithHardwareFailure() { return nodeRepository().getNodes(Node.State.ready).stream() .filter(node -> node.status().hardwareFailure().isPresent()) .collect(Collectors.toList()); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * However, nodes of other types are not replaced (because all of the type are used by a single application), * so we only allow one to be in failed at any point in time to protect against runaway failing. */ private boolean failAllowedFor(NodeType nodeType) { if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true; return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; } /** * If the node is positively DOWN, and there is no "down" history record, we add it. * If the node is positively UP we remove any "down" history record. * * @return a list of all nodes which are positively currently in the down state */ private List<Node> determineActiveNodeDownStatus() { List<Node> downNodes = new ArrayList<>(); for (ApplicationInstance<ServiceMonitorStatus> application : serviceMonitor.queryStatusOfAllApplicationInstances().values()) { for (ServiceCluster<ServiceMonitorStatus> cluster : application.serviceClusters()) { for (ServiceInstance<ServiceMonitorStatus> service : cluster.serviceInstances()) { Optional<Node> node = nodeRepository().getNode(service.hostName().s(), Node.State.active); if ( ! node.isPresent()) continue; if (service.serviceStatus().equals(ServiceMonitorStatus.DOWN)) downNodes.add(recordAsDown(node.get())); else if (service.serviceStatus().equals(ServiceMonitorStatus.UP)) clearDownRecord(node.get()); } } } return downNodes; } /** * Record a node as down if not already recorded and returns the node in the new state. * This assumes the node is found in the node * repo and that the node is allocated. If we get here otherwise something is truly odd. */ private Node recordAsDown(Node node) { if (node.history().event(History.Event.Type.down).isPresent()) return node; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); return nodeRepository().write(node.downAt(clock.instant())); } } private void clearDownRecord(Node node) { if ( ! node.history().event(History.Event.Type.down).isPresent()) return; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); nodeRepository().write(node.up()); } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if ( ! deployment.isPresent()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; for (Node failingTenantNode : nodeRepository().getChildNodes(node.hostname())) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reason); } else { nodeRepository().fail(failingTenantNode.hostname(), reason); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), reason); try { deployment.get().activate(); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname()); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ @Override public String toString() { return "Node failer"; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.01, 2), disabled(Duration.ZERO, 0, 0); public final Duration throttleWindow; public final double fractionAllowedToFail; public final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public String toHumanReadableString() { return String.format("Max %.0f%% or %d nodes can fail over a period of %s", fractionAllowedToFail*100, minimumAllowedToFail, throttleWindow); } } }
Not too familiar with the surrounding code, but if there's any chance of wrapping NoNodeException more than once, this 'if' should recursively search for NoNodeException.
private void notifyCompletion(Curator.CompletionWaiter completionWaiter) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { if (e.getCause().getClass() != KeeperException.NoNodeException.class) { throw e; } else { log.log(LogLevel.INFO, "Not able to notify completion for session: " + getSessionId() + ", node has been deleted"); } } }
if (e.getCause().getClass() != KeeperException.NoNodeException.class) {
private void notifyCompletion(Curator.CompletionWaiter completionWaiter) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { if (e.getCause().getClass() != KeeperException.NoNodeException.class) { throw e; } else { log.log(LogLevel.INFO, "Not able to notify completion for session: " + getSessionId() + ", node has been deleted"); } } }
class RemoteSession extends Session { private static final Logger log = Logger.getLogger(RemoteSession.class.getName()); private volatile ApplicationSet applicationSet = null; private final ActivatedModelsBuilder applicationLoader; /** * Creates a session. This involves loading the application, validating it and distributing it. * * @param tenant The name of the tenant creating session * @param sessionId The session id for this session. * @param globalComponentRegistry a registry of global components * @param zooKeeperClient a SessionZooKeeperClient instance */ public RemoteSession(TenantName tenant, long sessionId, GlobalComponentRegistry globalComponentRegistry, SessionZooKeeperClient zooKeeperClient) { super(tenant, sessionId, zooKeeperClient); this.applicationLoader = new ActivatedModelsBuilder(tenant, sessionId, zooKeeperClient, globalComponentRegistry); } public void loadPrepared() { Curator.CompletionWaiter waiter = zooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(); notifyCompletion(waiter); } private ApplicationSet loadApplication() { return ApplicationSet.fromList(applicationLoader.buildModels(zooKeeperClient.readApplicationId(getTenant()), zooKeeperClient.loadApplicationPackage())); } public ApplicationSet ensureApplicationLoaded() { if (applicationSet == null) { applicationSet = loadApplication(); } return applicationSet; } public Session.Status getStatus() { return zooKeeperClient.readStatus(); } public void deactivate() { applicationSet = null; } public void makeActive(ReloadHandler reloadHandler) { Curator.CompletionWaiter waiter = zooKeeperClient.getActiveWaiter(); log.log(LogLevel.DEBUG, logPre()+"Getting session from repo: " + getSessionId()); ApplicationSet app = ensureApplicationLoaded(); log.log(LogLevel.DEBUG, logPre() + "Reloading config for " + app); reloadHandler.reloadConfig(app); log.log(LogLevel.DEBUG, logPre() + "Notifying " + waiter); notifyCompletion(waiter); log.log(LogLevel.DEBUG, logPre() + "Session activated: " + app); } @Override public String logPre() { if (applicationSet != null) { return Tenants.logPre(applicationSet.getForVersionOrLatest(Optional.empty()).getId()); } return Tenants.logPre(getTenant()); } public void confirmUpload() { Curator.CompletionWaiter waiter = zooKeeperClient.getUploadWaiter(); log.log(LogLevel.DEBUG, "Notifying upload waiter for session " + getSessionId()); notifyCompletion(waiter); log.log(LogLevel.DEBUG, "Done notifying for session " + getSessionId()); } }
class RemoteSession extends Session { private static final Logger log = Logger.getLogger(RemoteSession.class.getName()); private volatile ApplicationSet applicationSet = null; private final ActivatedModelsBuilder applicationLoader; /** * Creates a session. This involves loading the application, validating it and distributing it. * * @param tenant The name of the tenant creating session * @param sessionId The session id for this session. * @param globalComponentRegistry a registry of global components * @param zooKeeperClient a SessionZooKeeperClient instance */ public RemoteSession(TenantName tenant, long sessionId, GlobalComponentRegistry globalComponentRegistry, SessionZooKeeperClient zooKeeperClient) { super(tenant, sessionId, zooKeeperClient); this.applicationLoader = new ActivatedModelsBuilder(tenant, sessionId, zooKeeperClient, globalComponentRegistry); } public void loadPrepared() { Curator.CompletionWaiter waiter = zooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(); notifyCompletion(waiter); } private ApplicationSet loadApplication() { return ApplicationSet.fromList(applicationLoader.buildModels(zooKeeperClient.readApplicationId(getTenant()), zooKeeperClient.loadApplicationPackage())); } public ApplicationSet ensureApplicationLoaded() { if (applicationSet == null) { applicationSet = loadApplication(); } return applicationSet; } public Session.Status getStatus() { return zooKeeperClient.readStatus(); } public void deactivate() { applicationSet = null; } public void makeActive(ReloadHandler reloadHandler) { Curator.CompletionWaiter waiter = zooKeeperClient.getActiveWaiter(); log.log(LogLevel.DEBUG, logPre()+"Getting session from repo: " + getSessionId()); ApplicationSet app = ensureApplicationLoaded(); log.log(LogLevel.DEBUG, logPre() + "Reloading config for " + app); reloadHandler.reloadConfig(app); log.log(LogLevel.DEBUG, logPre() + "Notifying " + waiter); notifyCompletion(waiter); log.log(LogLevel.DEBUG, logPre() + "Session activated: " + app); } @Override public String logPre() { if (applicationSet != null) { return Tenants.logPre(applicationSet.getForVersionOrLatest(Optional.empty()).getId()); } return Tenants.logPre(getTenant()); } public void confirmUpload() { Curator.CompletionWaiter waiter = zooKeeperClient.getUploadWaiter(); log.log(LogLevel.DEBUG, "Notifying upload waiter for session " + getSessionId()); notifyCompletion(waiter); log.log(LogLevel.DEBUG, "Done notifying for session " + getSessionId()); } }
No, that's not possible in this case. It's a simple create call, if it fails the thrown KeeperException is wrapped in a RuntimeException, I cannot see that it can be wrapped more than once.
private void notifyCompletion(Curator.CompletionWaiter completionWaiter) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { if (e.getCause().getClass() != KeeperException.NoNodeException.class) { throw e; } else { log.log(LogLevel.INFO, "Not able to notify completion for session: " + getSessionId() + ", node has been deleted"); } } }
if (e.getCause().getClass() != KeeperException.NoNodeException.class) {
private void notifyCompletion(Curator.CompletionWaiter completionWaiter) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { if (e.getCause().getClass() != KeeperException.NoNodeException.class) { throw e; } else { log.log(LogLevel.INFO, "Not able to notify completion for session: " + getSessionId() + ", node has been deleted"); } } }
class RemoteSession extends Session { private static final Logger log = Logger.getLogger(RemoteSession.class.getName()); private volatile ApplicationSet applicationSet = null; private final ActivatedModelsBuilder applicationLoader; /** * Creates a session. This involves loading the application, validating it and distributing it. * * @param tenant The name of the tenant creating session * @param sessionId The session id for this session. * @param globalComponentRegistry a registry of global components * @param zooKeeperClient a SessionZooKeeperClient instance */ public RemoteSession(TenantName tenant, long sessionId, GlobalComponentRegistry globalComponentRegistry, SessionZooKeeperClient zooKeeperClient) { super(tenant, sessionId, zooKeeperClient); this.applicationLoader = new ActivatedModelsBuilder(tenant, sessionId, zooKeeperClient, globalComponentRegistry); } public void loadPrepared() { Curator.CompletionWaiter waiter = zooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(); notifyCompletion(waiter); } private ApplicationSet loadApplication() { return ApplicationSet.fromList(applicationLoader.buildModels(zooKeeperClient.readApplicationId(getTenant()), zooKeeperClient.loadApplicationPackage())); } public ApplicationSet ensureApplicationLoaded() { if (applicationSet == null) { applicationSet = loadApplication(); } return applicationSet; } public Session.Status getStatus() { return zooKeeperClient.readStatus(); } public void deactivate() { applicationSet = null; } public void makeActive(ReloadHandler reloadHandler) { Curator.CompletionWaiter waiter = zooKeeperClient.getActiveWaiter(); log.log(LogLevel.DEBUG, logPre()+"Getting session from repo: " + getSessionId()); ApplicationSet app = ensureApplicationLoaded(); log.log(LogLevel.DEBUG, logPre() + "Reloading config for " + app); reloadHandler.reloadConfig(app); log.log(LogLevel.DEBUG, logPre() + "Notifying " + waiter); notifyCompletion(waiter); log.log(LogLevel.DEBUG, logPre() + "Session activated: " + app); } @Override public String logPre() { if (applicationSet != null) { return Tenants.logPre(applicationSet.getForVersionOrLatest(Optional.empty()).getId()); } return Tenants.logPre(getTenant()); } public void confirmUpload() { Curator.CompletionWaiter waiter = zooKeeperClient.getUploadWaiter(); log.log(LogLevel.DEBUG, "Notifying upload waiter for session " + getSessionId()); notifyCompletion(waiter); log.log(LogLevel.DEBUG, "Done notifying for session " + getSessionId()); } }
class RemoteSession extends Session { private static final Logger log = Logger.getLogger(RemoteSession.class.getName()); private volatile ApplicationSet applicationSet = null; private final ActivatedModelsBuilder applicationLoader; /** * Creates a session. This involves loading the application, validating it and distributing it. * * @param tenant The name of the tenant creating session * @param sessionId The session id for this session. * @param globalComponentRegistry a registry of global components * @param zooKeeperClient a SessionZooKeeperClient instance */ public RemoteSession(TenantName tenant, long sessionId, GlobalComponentRegistry globalComponentRegistry, SessionZooKeeperClient zooKeeperClient) { super(tenant, sessionId, zooKeeperClient); this.applicationLoader = new ActivatedModelsBuilder(tenant, sessionId, zooKeeperClient, globalComponentRegistry); } public void loadPrepared() { Curator.CompletionWaiter waiter = zooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(); notifyCompletion(waiter); } private ApplicationSet loadApplication() { return ApplicationSet.fromList(applicationLoader.buildModels(zooKeeperClient.readApplicationId(getTenant()), zooKeeperClient.loadApplicationPackage())); } public ApplicationSet ensureApplicationLoaded() { if (applicationSet == null) { applicationSet = loadApplication(); } return applicationSet; } public Session.Status getStatus() { return zooKeeperClient.readStatus(); } public void deactivate() { applicationSet = null; } public void makeActive(ReloadHandler reloadHandler) { Curator.CompletionWaiter waiter = zooKeeperClient.getActiveWaiter(); log.log(LogLevel.DEBUG, logPre()+"Getting session from repo: " + getSessionId()); ApplicationSet app = ensureApplicationLoaded(); log.log(LogLevel.DEBUG, logPre() + "Reloading config for " + app); reloadHandler.reloadConfig(app); log.log(LogLevel.DEBUG, logPre() + "Notifying " + waiter); notifyCompletion(waiter); log.log(LogLevel.DEBUG, logPre() + "Session activated: " + app); } @Override public String logPre() { if (applicationSet != null) { return Tenants.logPre(applicationSet.getForVersionOrLatest(Optional.empty()).getId()); } return Tenants.logPre(getTenant()); } public void confirmUpload() { Curator.CompletionWaiter waiter = zooKeeperClient.getUploadWaiter(); log.log(LogLevel.DEBUG, "Notifying upload waiter for session " + getSessionId()); notifyCompletion(waiter); log.log(LogLevel.DEBUG, "Done notifying for session " + getSessionId()); } }
I'll leave it then, as it's consistent with the way we store other versions.
private void toSlime(Allocation allocation, Cursor object) { object.setString(tenantIdKey, allocation.owner().tenant().value()); object.setString(applicationIdKey, allocation.owner().application().value()); object.setString(instanceIdKey, allocation.owner().instance().value()); object.setString(serviceIdKey, allocation.membership().stringValue()); object.setLong(restartGenerationKey, allocation.restartGeneration().wanted()); object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current()); object.setBool(removableKey, allocation.isRemovable()); allocation.membership().cluster().vespaVersion() .ifPresent(version -> object.setString(wantedVespaVersionKey, version.toString())); }
.ifPresent(version -> object.setString(wantedVespaVersionKey, version.toString()));
private void toSlime(Allocation allocation, Cursor object) { object.setString(tenantIdKey, allocation.owner().tenant().value()); object.setString(applicationIdKey, allocation.owner().application().value()); object.setString(instanceIdKey, allocation.owner().instance().value()); object.setString(serviceIdKey, allocation.membership().stringValue()); object.setLong(restartGenerationKey, allocation.restartGeneration().wanted()); object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current()); object.setBool(removableKey, allocation.isRemovable()); allocation.membership().cluster().vespaVersion() .ifPresent(version -> object.setString(wantedVespaVersionKey, version.toString())); }
class NodeSerializer { /** The configured node flavors */ private final NodeFlavors flavors; private static final String hostnameKey = "hostname"; private static final String ipAddressesKey = "ipAddresses"; private static final String openStackIdKey = "openStackId"; private static final String parentHostnameKey = "parentHostname"; private static final String historyKey = "history"; private static final String instanceKey = "instance"; private static final String rebootGenerationKey = "rebootGeneration"; private static final String currentRebootGenerationKey = "currentRebootGeneration"; private static final String vespaVersionKey = "vespaVersion"; private static final String hostedVersionKey = "hostedVersion"; private static final String stateVersionKey = "stateVersion"; private static final String failCountKey = "failCount"; private static final String hardwareFailureKey = "hardwareFailure"; private static final String nodeTypeKey = "type"; private static final String wantToRetireKey = "wantToRetire"; private static final String flavorKey = "flavor"; private static final String tenantIdKey = "tenantId"; private static final String applicationIdKey = "applicationId"; private static final String instanceIdKey = "instanceId"; private static final String serviceIdKey = "serviceId"; private static final String restartGenerationKey = "restartGeneration"; private static final String currentRestartGenerationKey = "currentRestartGeneration"; private static final String removableKey = "removable"; private static final String dockerImageKey = "dockerImage"; private static final String wantedVespaVersionKey = "wantedVespaVersion"; private static final String historyEventTypeKey = "type"; private static final String atKey = "at"; private static final String agentKey = "agent"; public NodeSerializer(NodeFlavors flavors) { this.flavors = flavors; } public byte[] toJson(Node node) { try { Slime slime = new Slime(); toSlime(node, slime.setObject()); return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new RuntimeException("Serialization of " + node + " to json failed", e); } } private void toSlime(Node node, Cursor object) { object.setString(hostnameKey, node.hostname()); toSlime(node.ipAddresses(), object.setArray(ipAddressesKey)); object.setString(openStackIdKey, node.openStackId()); node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname)); object.setString(flavorKey, node.flavor().name()); object.setLong(rebootGenerationKey, node.status().reboot().wanted()); object.setLong(currentRebootGenerationKey, node.status().reboot().current()); node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString())); node.status().hostedVersion().ifPresent(version -> object.setString(hostedVersionKey, version.toString())); node.status().stateVersion().ifPresent(version -> object.setString(stateVersionKey, version)); node.status().dockerImage().ifPresent(image -> object.setString(dockerImageKey, image)); object.setLong(failCountKey, node.status().failCount()); node.status().hardwareFailure().ifPresent(failure -> object.setString(hardwareFailureKey, toString(failure))); object.setBool(wantToRetireKey, node.status().wantToRetire()); node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey))); toSlime(node.history(), object.setArray(historyKey)); object.setString(nodeTypeKey, toString(node.type())); } private void toSlime(History history, Cursor array) { for (History.Event event : history.events()) toSlime(event, array.addObject()); } private void toSlime(History.Event event, Cursor object) { object.setString(historyEventTypeKey, toString(event.type())); object.setLong(atKey, event.at().toEpochMilli()); object.setString(agentKey, toString(event.agent())); } private void toSlime(Set<String> ipAddresses, Cursor array) { ipAddresses.forEach(array::addString); } public Node fromJson(Node.State state, byte[] data) { return nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get()); } private Node nodeFromSlime(Node.State state, Inspector object) { return new Node(object.field(openStackIdKey).asString(), ipAddressesFromSlime(object), object.field(hostnameKey).asString(), parentHostnameFromSlime(object), flavorFromSlime(object), statusFromSlime(object), state, allocationFromSlime(object.field(instanceKey)), historyFromSlime(object.field(historyKey)), nodeTypeFromString(object.field(nodeTypeKey).asString())); } private Status statusFromSlime(Inspector object) { return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey), softwareVersionFromSlime(object.field(vespaVersionKey)), softwareVersionFromSlime(object.field(hostedVersionKey)), optionalString(object.field(stateVersionKey)), optionalString(object.field(dockerImageKey)), (int)object.field(failCountKey).asLong(), hardwareFailureFromSlime(object.field(hardwareFailureKey)), object.field(wantToRetireKey).asBool()); } private Flavor flavorFromSlime(Inspector object) { return flavors.getFlavorOrThrow(object.field(flavorKey).asString()); } private Optional<Allocation> allocationFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new Allocation(applicationIdFromSlime(object), clusterMembershipFromSlime(object), generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey), object.field(removableKey).asBool())); } private ApplicationId applicationIdFromSlime(Inspector object) { return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()), ApplicationName.from(object.field(applicationIdKey).asString()), InstanceName.from(object.field(instanceIdKey).asString())); } private History historyFromSlime(Inspector array) { List<History.Event> events = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> { History.Event event = eventFromSlime(item); if (event != null) events.add(event); }); return new History(events); } private History.Event eventFromSlime(Inspector object) { History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString()); if (type == null) return null; Instant at = Instant.ofEpochMilli(object.field(atKey).asLong()); Agent agent = eventAgentFromSlime(object.field(agentKey)); return new History.Event(type, agent, at); } private Generation generationFromSlime(Inspector object, String wantedField, String currentField) { Inspector current = object.field(currentField); return new Generation(object.field(wantedField).asLong(), current.asLong()); } private ClusterMembership clusterMembershipFromSlime(Inspector object) { Optional<Version> vespaVersion; if (object.field(dockerImageKey).valid()) { vespaVersion = optionalString(object.field(dockerImageKey)) .map(DockerImage::new) .map(DockerImage::tagAsVersion); } else { vespaVersion = softwareVersionFromSlime(object.field(wantedVespaVersionKey)); } return ClusterMembership.fromVersion(object.field(serviceIdKey).asString(), vespaVersion); } private Optional<Version> softwareVersionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(Version.fromString(object.asString())); } private Optional<String> parentHostnameFromSlime(Inspector object) { if (object.field(parentHostnameKey).valid()) return Optional.of(object.field(parentHostnameKey).asString()); else return Optional.empty(); } private Set<String> ipAddressesFromSlime(Inspector object) { ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder(); object.field(ipAddressesKey).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString())); return ipAddresses.build(); } private Optional<Status.HardwareFailureType> hardwareFailureFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(hardwareFailureFromString(object.asString())); } /** Returns the event type, or null if this event type should be ignored */ private History.Event.Type eventTypeFromString(String eventTypeString) { switch (eventTypeString) { case "readied" : return History.Event.Type.readied; case "reserved" : return History.Event.Type.reserved; case "activated" : return History.Event.Type.activated; case "retired" : return History.Event.Type.retired; case "deactivated" : return History.Event.Type.deactivated; case "failed" : return History.Event.Type.failed; case "deallocated" : return History.Event.Type.deallocated; case "down" : return History.Event.Type.down; case "requested" : return History.Event.Type.requested; case "rebooted" : return History.Event.Type.rebooted; } throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'"); } private String toString(History.Event.Type nodeEventType) { switch (nodeEventType) { case readied : return "readied"; case reserved : return "reserved"; case activated : return "activated"; case retired : return "retired"; case deactivated : return "deactivated"; case failed : return "failed"; case deallocated : return "deallocated"; case down : return "down"; case requested: return "requested"; case rebooted: return "rebooted"; } throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined"); } private Agent eventAgentFromSlime(Inspector eventAgentField) { if ( ! eventAgentField.valid()) return Agent.system; switch (eventAgentField.asString()) { case "application" : return Agent.application; case "system" : return Agent.system; case "operator" : return Agent.operator; } throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'"); } private String toString(Agent agent) { switch (agent) { case application : return "application"; case system : return "system"; case operator : return "operator"; } throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined"); } private NodeType nodeTypeFromString(String typeString) { switch (typeString) { case "tenant" : return NodeType.tenant; case "host" : return NodeType.host; case "proxy" : return NodeType.proxy; default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'"); } } private String toString(NodeType type) { switch (type) { case tenant: return "tenant"; case host: return "host"; case proxy: return "proxy"; } throw new IllegalArgumentException("Serialized form of '" + type + "' not defined"); } private Status.HardwareFailureType hardwareFailureFromString(String hardwareFailureString) { switch (hardwareFailureString) { case "memory_mcelog" : return Status.HardwareFailureType.memory_mcelog; case "disk_smart" : return Status.HardwareFailureType.disk_smart; case "disk_kernel" : return Status.HardwareFailureType.disk_kernel; case "unknown" : return Status.HardwareFailureType.unknown; default : throw new IllegalArgumentException("Unknown hardware failure '" + hardwareFailureString + "'"); } } private String toString(Status.HardwareFailureType type) { switch (type) { case memory_mcelog: return "memory_mcelog"; case disk_smart: return "disk_smart"; case disk_kernel: return "disk_kernel"; case unknown: return "unknown"; default : throw new IllegalArgumentException("Serialized form of '" + type + " not defined"); } } }
class NodeSerializer { /** The configured node flavors */ private final NodeFlavors flavors; private static final String hostnameKey = "hostname"; private static final String ipAddressesKey = "ipAddresses"; private static final String openStackIdKey = "openStackId"; private static final String parentHostnameKey = "parentHostname"; private static final String historyKey = "history"; private static final String instanceKey = "instance"; private static final String rebootGenerationKey = "rebootGeneration"; private static final String currentRebootGenerationKey = "currentRebootGeneration"; private static final String vespaVersionKey = "vespaVersion"; private static final String hostedVersionKey = "hostedVersion"; private static final String stateVersionKey = "stateVersion"; private static final String failCountKey = "failCount"; private static final String hardwareFailureKey = "hardwareFailure"; private static final String nodeTypeKey = "type"; private static final String wantToRetireKey = "wantToRetire"; private static final String flavorKey = "flavor"; private static final String tenantIdKey = "tenantId"; private static final String applicationIdKey = "applicationId"; private static final String instanceIdKey = "instanceId"; private static final String serviceIdKey = "serviceId"; private static final String restartGenerationKey = "restartGeneration"; private static final String currentRestartGenerationKey = "currentRestartGeneration"; private static final String removableKey = "removable"; private static final String dockerImageKey = "dockerImage"; private static final String wantedVespaVersionKey = "wantedVespaVersion"; private static final String historyEventTypeKey = "type"; private static final String atKey = "at"; private static final String agentKey = "agent"; public NodeSerializer(NodeFlavors flavors) { this.flavors = flavors; } public byte[] toJson(Node node) { try { Slime slime = new Slime(); toSlime(node, slime.setObject()); return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new RuntimeException("Serialization of " + node + " to json failed", e); } } private void toSlime(Node node, Cursor object) { object.setString(hostnameKey, node.hostname()); toSlime(node.ipAddresses(), object.setArray(ipAddressesKey)); object.setString(openStackIdKey, node.openStackId()); node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname)); object.setString(flavorKey, node.flavor().name()); object.setLong(rebootGenerationKey, node.status().reboot().wanted()); object.setLong(currentRebootGenerationKey, node.status().reboot().current()); node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString())); node.status().hostedVersion().ifPresent(version -> object.setString(hostedVersionKey, version.toString())); node.status().stateVersion().ifPresent(version -> object.setString(stateVersionKey, version)); node.status().dockerImage().ifPresent(image -> object.setString(dockerImageKey, image)); object.setLong(failCountKey, node.status().failCount()); node.status().hardwareFailure().ifPresent(failure -> object.setString(hardwareFailureKey, toString(failure))); object.setBool(wantToRetireKey, node.status().wantToRetire()); node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey))); toSlime(node.history(), object.setArray(historyKey)); object.setString(nodeTypeKey, toString(node.type())); } private void toSlime(History history, Cursor array) { for (History.Event event : history.events()) toSlime(event, array.addObject()); } private void toSlime(History.Event event, Cursor object) { object.setString(historyEventTypeKey, toString(event.type())); object.setLong(atKey, event.at().toEpochMilli()); object.setString(agentKey, toString(event.agent())); } private void toSlime(Set<String> ipAddresses, Cursor array) { ipAddresses.forEach(array::addString); } public Node fromJson(Node.State state, byte[] data) { return nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get()); } private Node nodeFromSlime(Node.State state, Inspector object) { return new Node(object.field(openStackIdKey).asString(), ipAddressesFromSlime(object), object.field(hostnameKey).asString(), parentHostnameFromSlime(object), flavorFromSlime(object), statusFromSlime(object), state, allocationFromSlime(object.field(instanceKey)), historyFromSlime(object.field(historyKey)), nodeTypeFromString(object.field(nodeTypeKey).asString())); } private Status statusFromSlime(Inspector object) { return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey), softwareVersionFromSlime(object.field(vespaVersionKey)), softwareVersionFromSlime(object.field(hostedVersionKey)), optionalString(object.field(stateVersionKey)), optionalString(object.field(dockerImageKey)), (int)object.field(failCountKey).asLong(), hardwareFailureFromSlime(object.field(hardwareFailureKey)), object.field(wantToRetireKey).asBool()); } private Flavor flavorFromSlime(Inspector object) { return flavors.getFlavorOrThrow(object.field(flavorKey).asString()); } private Optional<Allocation> allocationFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new Allocation(applicationIdFromSlime(object), clusterMembershipFromSlime(object), generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey), object.field(removableKey).asBool())); } private ApplicationId applicationIdFromSlime(Inspector object) { return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()), ApplicationName.from(object.field(applicationIdKey).asString()), InstanceName.from(object.field(instanceIdKey).asString())); } private History historyFromSlime(Inspector array) { List<History.Event> events = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> { History.Event event = eventFromSlime(item); if (event != null) events.add(event); }); return new History(events); } private History.Event eventFromSlime(Inspector object) { History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString()); if (type == null) return null; Instant at = Instant.ofEpochMilli(object.field(atKey).asLong()); Agent agent = eventAgentFromSlime(object.field(agentKey)); return new History.Event(type, agent, at); } private Generation generationFromSlime(Inspector object, String wantedField, String currentField) { Inspector current = object.field(currentField); return new Generation(object.field(wantedField).asLong(), current.asLong()); } private ClusterMembership clusterMembershipFromSlime(Inspector object) { Optional<Version> vespaVersion; if (object.field(dockerImageKey).valid()) { vespaVersion = optionalString(object.field(dockerImageKey)) .map(DockerImage::new) .map(DockerImage::tagAsVersion); } else { vespaVersion = softwareVersionFromSlime(object.field(wantedVespaVersionKey)); } return ClusterMembership.fromVersion(object.field(serviceIdKey).asString(), vespaVersion); } private Optional<Version> softwareVersionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(Version.fromString(object.asString())); } private Optional<String> parentHostnameFromSlime(Inspector object) { if (object.field(parentHostnameKey).valid()) return Optional.of(object.field(parentHostnameKey).asString()); else return Optional.empty(); } private Set<String> ipAddressesFromSlime(Inspector object) { ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder(); object.field(ipAddressesKey).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString())); return ipAddresses.build(); } private Optional<Status.HardwareFailureType> hardwareFailureFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(hardwareFailureFromString(object.asString())); } /** Returns the event type, or null if this event type should be ignored */ private History.Event.Type eventTypeFromString(String eventTypeString) { switch (eventTypeString) { case "readied" : return History.Event.Type.readied; case "reserved" : return History.Event.Type.reserved; case "activated" : return History.Event.Type.activated; case "retired" : return History.Event.Type.retired; case "deactivated" : return History.Event.Type.deactivated; case "failed" : return History.Event.Type.failed; case "deallocated" : return History.Event.Type.deallocated; case "down" : return History.Event.Type.down; case "requested" : return History.Event.Type.requested; case "rebooted" : return History.Event.Type.rebooted; } throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'"); } private String toString(History.Event.Type nodeEventType) { switch (nodeEventType) { case readied : return "readied"; case reserved : return "reserved"; case activated : return "activated"; case retired : return "retired"; case deactivated : return "deactivated"; case failed : return "failed"; case deallocated : return "deallocated"; case down : return "down"; case requested: return "requested"; case rebooted: return "rebooted"; } throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined"); } private Agent eventAgentFromSlime(Inspector eventAgentField) { if ( ! eventAgentField.valid()) return Agent.system; switch (eventAgentField.asString()) { case "application" : return Agent.application; case "system" : return Agent.system; case "operator" : return Agent.operator; } throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'"); } private String toString(Agent agent) { switch (agent) { case application : return "application"; case system : return "system"; case operator : return "operator"; } throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined"); } private NodeType nodeTypeFromString(String typeString) { switch (typeString) { case "tenant" : return NodeType.tenant; case "host" : return NodeType.host; case "proxy" : return NodeType.proxy; default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'"); } } private String toString(NodeType type) { switch (type) { case tenant: return "tenant"; case host: return "host"; case proxy: return "proxy"; } throw new IllegalArgumentException("Serialized form of '" + type + "' not defined"); } private Status.HardwareFailureType hardwareFailureFromString(String hardwareFailureString) { switch (hardwareFailureString) { case "memory_mcelog" : return Status.HardwareFailureType.memory_mcelog; case "disk_smart" : return Status.HardwareFailureType.disk_smart; case "disk_kernel" : return Status.HardwareFailureType.disk_kernel; case "unknown" : return Status.HardwareFailureType.unknown; default : throw new IllegalArgumentException("Unknown hardware failure '" + hardwareFailureString + "'"); } } private String toString(Status.HardwareFailureType type) { switch (type) { case memory_mcelog: return "memory_mcelog"; case disk_smart: return "disk_smart"; case disk_kernel: return "disk_kernel"; case unknown: return "unknown"; default : throw new IllegalArgumentException("Serialized form of '" + type + " not defined"); } } }