comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
nit: Create an `HttpHeaderName` for `x-ms-encryption-context` and use the `HttpHeaderName` API instead, it performs much much better.
static String getEncryptionContext(Response<?> r) { if (r == null) { return null; } return r.getHeaders().getValue("x-ms-encryption-context"); }
return r.getHeaders().getValue("x-ms-encryption-context");
static String getEncryptionContext(Response<?> r) { if (r == null) { return null; } return r.getHeaders().getValue(X_MS_ENCRYPTION_CONTEXT); }
class Transforms { private static final String SERIALIZATION_MESSAGE = String.format("'serialization' must be one of %s, %s, %s or " + "%s.", FileQueryJsonSerialization.class.getSimpleName(), FileQueryDelimitedSerialization.class.getSimpleName(), FileQueryArrowSerialization.class.getSimpleName(), FileQueryParquetSerialization.class.getSimpleName()); private static final long EPOCH_CONVERSION; static { GregorianCalendar unixEpoch = new GregorianCalendar(); unixEpoch.clear(); unixEpoch.set(1970, Calendar.JANUARY, 1, 0, 0, 0); GregorianCalendar windowsEpoch = new GregorianCalendar(); windowsEpoch.clear(); windowsEpoch.set(1601, Calendar.JANUARY, 1, 0, 0, 0); EPOCH_CONVERSION = unixEpoch.getTimeInMillis() - windowsEpoch.getTimeInMillis(); } static com.azure.storage.blob.models.PublicAccessType toBlobPublicAccessType(PublicAccessType fileSystemPublicAccessType) { if (fileSystemPublicAccessType == null) { return null; } return com.azure.storage.blob.models.PublicAccessType.fromString(fileSystemPublicAccessType.toString()); } private static LeaseDurationType toDataLakeLeaseDurationType(com.azure.storage.blob.models.LeaseDurationType blobLeaseDurationType) { if (blobLeaseDurationType == null) { return null; } return LeaseDurationType.fromString(blobLeaseDurationType.toString()); } private static LeaseStateType toDataLakeLeaseStateType(com.azure.storage.blob.models.LeaseStateType blobLeaseStateType) { if (blobLeaseStateType == null) { return null; } return LeaseStateType.fromString(blobLeaseStateType.toString()); } private static LeaseStatusType toDataLakeLeaseStatusType(com.azure.storage.blob.models.LeaseStatusType blobLeaseStatusType) { if (blobLeaseStatusType == null) { return null; } return LeaseStatusType.fromString(blobLeaseStatusType.toString()); } private static PublicAccessType toDataLakePublicAccessType(com.azure.storage.blob.models.PublicAccessType blobPublicAccessType) { if (blobPublicAccessType == null) { return null; } return PublicAccessType.fromString(blobPublicAccessType.toString()); } private static CopyStatusType toDataLakeCopyStatusType( com.azure.storage.blob.models.CopyStatusType blobCopyStatus) { if (blobCopyStatus == null) { return null; } return CopyStatusType.fromString(blobCopyStatus.toString()); } private static ArchiveStatus toDataLakeArchiveStatus( com.azure.storage.blob.models.ArchiveStatus blobArchiveStatus) { if (blobArchiveStatus == null) { return null; } return ArchiveStatus.fromString(blobArchiveStatus.toString()); } private static AccessTier toDataLakeAccessTier(com.azure.storage.blob.models.AccessTier blobAccessTier) { if (blobAccessTier == null) { return null; } return AccessTier.fromString(blobAccessTier.toString()); } static FileSystemProperties toFileSystemProperties(BlobContainerProperties blobContainerProperties) { if (blobContainerProperties == null) { return null; } FileSystemProperties fileSystemProperties = new FileSystemProperties(blobContainerProperties.getMetadata(), blobContainerProperties.getETag(), blobContainerProperties.getLastModified(), Transforms.toDataLakeLeaseDurationType(blobContainerProperties.getLeaseDuration()), Transforms.toDataLakeLeaseStateType(blobContainerProperties.getLeaseState()), Transforms.toDataLakeLeaseStatusType(blobContainerProperties.getLeaseStatus()), Transforms.toDataLakePublicAccessType(blobContainerProperties.getBlobPublicAccess()), blobContainerProperties.hasImmutabilityPolicy(), blobContainerProperties.hasLegalHold()); return AccessorUtility.getFileSystemPropertiesAccessor() .setFileSystemProperties(fileSystemProperties, blobContainerProperties.getDefaultEncryptionScope(), blobContainerProperties.isEncryptionScopeOverridePrevented()); } private static BlobContainerListDetails toBlobContainerListDetails(FileSystemListDetails fileSystemListDetails) { if (fileSystemListDetails == null) { return null; } return new BlobContainerListDetails() .setRetrieveMetadata(fileSystemListDetails.getRetrieveMetadata()) .setRetrieveDeleted(fileSystemListDetails.getRetrieveDeleted()) .setRetrieveSystemContainers(fileSystemListDetails.getRetrieveSystemFileSystems()); } static ListBlobContainersOptions toListBlobContainersOptions(ListFileSystemsOptions listFileSystemsOptions) { if (listFileSystemsOptions == null) { return null; } return new ListBlobContainersOptions() .setDetails(toBlobContainerListDetails(listFileSystemsOptions.getDetails())) .setMaxResultsPerPage(listFileSystemsOptions.getMaxResultsPerPage()) .setPrefix(listFileSystemsOptions.getPrefix()); } static UserDelegationKey toDataLakeUserDelegationKey(com.azure.storage.blob.models.UserDelegationKey blobUserDelegationKey) { if (blobUserDelegationKey == null) { return null; } return new UserDelegationKey() .setSignedExpiry(blobUserDelegationKey.getSignedExpiry()) .setSignedObjectId(blobUserDelegationKey.getSignedObjectId()) .setSignedTenantId(blobUserDelegationKey.getSignedTenantId()) .setSignedService(blobUserDelegationKey.getSignedService()) .setSignedStart(blobUserDelegationKey.getSignedStart()) .setSignedVersion(blobUserDelegationKey.getSignedVersion()) .setValue(blobUserDelegationKey.getValue()); } static BlobHttpHeaders toBlobHttpHeaders(PathHttpHeaders pathHTTPHeaders) { if (pathHTTPHeaders == null) { return null; } return new BlobHttpHeaders() .setCacheControl(pathHTTPHeaders.getCacheControl()) .setContentDisposition(pathHTTPHeaders.getContentDisposition()) .setContentEncoding(pathHTTPHeaders.getContentEncoding()) .setContentLanguage(pathHTTPHeaders.getContentLanguage()) .setContentType(pathHTTPHeaders.getContentType()) .setContentMd5(pathHTTPHeaders.getContentMd5()); } static BlobInputStreamOptions toBlobInputStreamOptions(DataLakeFileInputStreamOptions options) { if (options == null) { return null; } return new BlobInputStreamOptions() .setBlockSize(options.getBlockSize()) .setRange(toBlobRange(options.getRange())) .setRequestConditions(toBlobRequestConditions(options.getRequestConditions())) .setConsistentReadControl(toBlobConsistentReadControl(options.getConsistentReadControl())); } static com.azure.storage.blob.models.ConsistentReadControl toBlobConsistentReadControl( com.azure.storage.file.datalake.models.ConsistentReadControl datalakeConsistentReadControl) { if (datalakeConsistentReadControl == null) { return null; } switch (datalakeConsistentReadControl) { case NONE: return ConsistentReadControl.NONE; case ETAG: return ConsistentReadControl.ETAG; default: throw new IllegalArgumentException("Could not convert ConsistentReadControl"); } } static BlobRange toBlobRange(FileRange fileRange) { if (fileRange == null) { return null; } return new BlobRange(fileRange.getOffset(), fileRange.getCount()); } static com.azure.storage.blob.models.DownloadRetryOptions toBlobDownloadRetryOptions( DownloadRetryOptions dataLakeOptions) { if (dataLakeOptions == null) { return null; } return new com.azure.storage.blob.models.DownloadRetryOptions() .setMaxRetryRequests(dataLakeOptions.getMaxRetryRequests()); } static PathProperties toPathProperties(BlobProperties properties, String encryptionContext) { if (properties == null) { return null; } else { PathProperties pathProperties = new PathProperties(properties.getCreationTime(), properties.getLastModified(), properties.getETag(), properties.getBlobSize(), properties.getContentType(), properties.getContentMd5(), properties.getContentEncoding(), properties.getContentDisposition(), properties.getContentLanguage(), properties.getCacheControl(), Transforms.toDataLakeLeaseStatusType(properties.getLeaseStatus()), Transforms.toDataLakeLeaseStateType(properties.getLeaseState()), Transforms.toDataLakeLeaseDurationType(properties.getLeaseDuration()), properties.getCopyId(), Transforms.toDataLakeCopyStatusType(properties.getCopyStatus()), properties.getCopySource(), properties.getCopyProgress(), properties.getCopyCompletionTime(), properties.getCopyStatusDescription(), properties.isServerEncrypted(), properties.isIncrementalCopy(), Transforms.toDataLakeAccessTier(properties.getAccessTier()), Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(), properties.getAccessTierChangeTime(), properties.getMetadata(), properties.getExpiresOn()); return AccessorUtility.getPathPropertiesAccessor().setPathProperties(pathProperties, properties.getEncryptionScope(), encryptionContext); } } static FileSystemItem toFileSystemItem(BlobContainerItem blobContainerItem) { if (blobContainerItem == null) { return null; } return new FileSystemItem() .setName(blobContainerItem.getName()) .setDeleted(blobContainerItem.isDeleted()) .setVersion(blobContainerItem.getVersion()) .setMetadata(blobContainerItem.getMetadata()) .setProperties(Transforms.toFileSystemItemProperties(blobContainerItem.getProperties())); } private static FileSystemItemProperties toFileSystemItemProperties( BlobContainerItemProperties blobContainerItemProperties) { if (blobContainerItemProperties == null) { return null; } return new FileSystemItemProperties() .setETag(blobContainerItemProperties.getETag()) .setLastModified(blobContainerItemProperties.getLastModified()) .setLeaseStatus(toDataLakeLeaseStatusType(blobContainerItemProperties.getLeaseStatus())) .setLeaseState(toDataLakeLeaseStateType(blobContainerItemProperties.getLeaseState())) .setLeaseDuration(toDataLakeLeaseDurationType(blobContainerItemProperties.getLeaseDuration())) .setPublicAccess(toDataLakePublicAccessType(blobContainerItemProperties.getPublicAccess())) .setHasLegalHold(blobContainerItemProperties.isHasLegalHold()) .setHasImmutabilityPolicy(blobContainerItemProperties.isHasImmutabilityPolicy()) .setEncryptionScope(blobContainerItemProperties.getDefaultEncryptionScope()) .setEncryptionScopeOverridePrevented(blobContainerItemProperties.isEncryptionScopeOverridePrevented()); } static PathItem toPathItem(Path path) { if (path == null) { return null; } PathItem pathItem = new PathItem(path.getETag(), parseDateOrNull(path.getLastModified()), path.getContentLength() == null ? 0 : path.getContentLength(), path.getGroup(), path.isDirectory() != null && path.isDirectory(), path.getName(), path.getOwner(), path.getPermissions(), path.getCreationTime() == null ? null : fromWindowsFileTimeOrNull(Long.parseLong(path.getCreationTime())), path.getExpiryTime() == null ? null : fromWindowsFileTimeOrNull(Long.parseLong(path.getExpiryTime()))); return AccessorUtility.getPathItemAccessor().setPathItemProperties(pathItem, path.getEncryptionScope(), path.getEncryptionContext()); } private static OffsetDateTime parseDateOrNull(String date) { return date == null ? null : OffsetDateTime.parse(date, DateTimeFormatter.RFC_1123_DATE_TIME); } private static OffsetDateTime fromWindowsFileTimeOrNull(long fileTime) { if (fileTime == 0) { return null; } long fileTimeMs = fileTime / 10000; long fileTimeUnixEpoch = fileTimeMs - EPOCH_CONVERSION; return Instant.ofEpochMilli(fileTimeUnixEpoch).atOffset(ZoneOffset.UTC); } static BlobRequestConditions toBlobRequestConditions(DataLakeRequestConditions requestConditions) { if (requestConditions == null) { return null; } return new BlobRequestConditions() .setLeaseId(requestConditions.getLeaseId()) .setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince()) .setIfNoneMatch(requestConditions.getIfNoneMatch()) .setIfMatch(requestConditions.getIfMatch()) .setIfModifiedSince(requestConditions.getIfModifiedSince()); } static FileReadResponse toFileReadResponse(BlobDownloadResponse r) { if (r == null) { return null; } return new FileReadResponse(Transforms.toFileReadAsyncResponse(new BlobDownloadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r) { if (r == null) { return null; } return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toPathReadHeaders(r.getDeserializedHeaders(), getEncryptionContext(r))); } private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String encryptionContext) { if (h == null) { return null; } return new FileReadHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMd5(h.getBlobContentMD5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()) .setEncryptionContext(encryptionContext); } static List<BlobSignedIdentifier> toBlobIdentifierList(List<DataLakeSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<BlobSignedIdentifier> blobIdentifiers = new ArrayList<>(); for (DataLakeSignedIdentifier identifier : identifiers) { blobIdentifiers.add(Transforms.toBlobIdentifier(identifier)); } return blobIdentifiers; } private static BlobSignedIdentifier toBlobIdentifier(DataLakeSignedIdentifier identifier) { if (identifier == null) { return null; } return new BlobSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toBlobAccessPolicy(identifier.getAccessPolicy())); } private static BlobAccessPolicy toBlobAccessPolicy(DataLakeAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new BlobAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static FileSystemAccessPolicies toFileSystemAccessPolicies(BlobContainerAccessPolicies accessPolicies) { if (accessPolicies == null) { return null; } return new FileSystemAccessPolicies(Transforms.toDataLakePublicAccessType(accessPolicies.getBlobAccessType()), Transforms.toDataLakeIdentifierList(accessPolicies.getIdentifiers())); } static List<DataLakeSignedIdentifier> toDataLakeIdentifierList(List<BlobSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<DataLakeSignedIdentifier> dataLakeIdentifiers = new ArrayList<>(); for (BlobSignedIdentifier identifier : identifiers) { dataLakeIdentifiers.add(Transforms.toDataLakeIdentifier(identifier)); } return dataLakeIdentifiers; } private static DataLakeSignedIdentifier toDataLakeIdentifier(BlobSignedIdentifier identifier) { if (identifier == null) { return null; } return new DataLakeSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toDataLakeAccessPolicy(identifier.getAccessPolicy())); } private static DataLakeAccessPolicy toDataLakeAccessPolicy(BlobAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new DataLakeAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static BlobQuerySerialization toBlobQuerySerialization(FileQuerySerialization ser) { if (ser == null) { return null; } if (ser instanceof FileQueryJsonSerialization) { FileQueryJsonSerialization jsonSer = (FileQueryJsonSerialization) ser; return new BlobQueryJsonSerialization().setRecordSeparator(jsonSer.getRecordSeparator()); } else if (ser instanceof FileQueryDelimitedSerialization) { FileQueryDelimitedSerialization delSer = (FileQueryDelimitedSerialization) ser; return new BlobQueryDelimitedSerialization() .setColumnSeparator(delSer.getColumnSeparator()) .setEscapeChar(delSer.getEscapeChar()) .setFieldQuote(delSer.getFieldQuote()) .setHeadersPresent(delSer.isHeadersPresent()) .setRecordSeparator(delSer.getRecordSeparator()); } else if (ser instanceof FileQueryArrowSerialization) { FileQueryArrowSerialization arrSer = (FileQueryArrowSerialization) ser; return new BlobQueryArrowSerialization().setSchema(toBlobQueryArrowSchema(arrSer.getSchema())); } else if (ser instanceof FileQueryParquetSerialization) { return new BlobQueryParquetSerialization(); } else { throw new IllegalArgumentException(SERIALIZATION_MESSAGE); } } private static List<BlobQueryArrowField> toBlobQueryArrowSchema(List<FileQueryArrowField> schema) { if (schema == null) { return null; } List<BlobQueryArrowField> blobSchema = new ArrayList<>(schema.size()); for (FileQueryArrowField field : schema) { blobSchema.add(toBlobQueryArrowField(field)); } return blobSchema; } private static BlobQueryArrowField toBlobQueryArrowField(FileQueryArrowField field) { if (field == null) { return null; } return new BlobQueryArrowField(BlobQueryArrowFieldType.fromString(field.getType().toString())) .setName(field.getName()) .setPrecision(field.getPrecision()) .setScale(field.getScale()); } static Consumer<BlobQueryError> toBlobQueryErrorConsumer(Consumer<FileQueryError> er) { if (er == null) { return null; } return error -> er.accept(toFileQueryError(error)); } static Consumer<BlobQueryProgress> toBlobQueryProgressConsumer(Consumer<FileQueryProgress> pr) { if (pr == null) { return null; } return progress -> pr.accept(toFileQueryProgress(progress)); } private static FileQueryError toFileQueryError(BlobQueryError error) { if (error == null) { return null; } return new FileQueryError(error.isFatal(), error.getName(), error.getDescription(), error.getPosition()); } private static FileQueryProgress toFileQueryProgress(BlobQueryProgress progress) { if (progress == null) { return null; } return new FileQueryProgress(progress.getBytesScanned(), progress.getTotalBytes()); } static FileQueryResponse toFileQueryResponse(BlobQueryResponse r) { if (r == null) { return null; } return new FileQueryResponse(Transforms.toFileQueryAsyncResponse(new BlobQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileQueryAsyncResponse toFileQueryAsyncResponse(BlobQueryAsyncResponse r) { if (r == null) { return null; } return new FileQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toFileQueryHeaders(r.getDeserializedHeaders())); } private static FileQueryHeaders toFileQueryHeaders(BlobQueryHeaders h) { if (h == null) { return null; } return new FileQueryHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMd5(h.getContentMd5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()); } static BlobQueryOptions toBlobQueryOptions(FileQueryOptions options) { if (options == null) { return null; } if (options.getOutputStream() == null) { return new BlobQueryOptions(options.getExpression()) .setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization())) .setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization())) .setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions())) .setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer())) .setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer())); } else { return new BlobQueryOptions(options.getExpression(), options.getOutputStream()) .setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization())) .setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization())) .setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions())) .setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer())) .setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer())); } } static UndeleteBlobContainerOptions toBlobContainerUndeleteOptions(FileSystemUndeleteOptions options) { if (options == null) { return null; } return new UndeleteBlobContainerOptions(options.getDeletedFileSystemName(), options.getDeletedFileSystemVersion()).setDestinationContainerName(options.getDestinationFileSystemName()); } static DataLakeServiceProperties toDataLakeServiceProperties(BlobServiceProperties blobProps) { if (blobProps == null) { return null; } return new DataLakeServiceProperties() .setDefaultServiceVersion(blobProps.getDefaultServiceVersion()) .setCors(blobProps.getCors().stream().map(Transforms::toDataLakeCorsRule).collect(Collectors.toList())) .setDeleteRetentionPolicy(toDataLakeRetentionPolicy(blobProps.getDeleteRetentionPolicy())) .setHourMetrics(toDataLakeMetrics(blobProps.getHourMetrics())) .setMinuteMetrics(toDataLakeMetrics(blobProps.getMinuteMetrics())) .setLogging(toDataLakeAnalyticsLogging(blobProps.getLogging())) .setStaticWebsite(toDataLakeStaticWebsite(blobProps.getStaticWebsite())); } static DataLakeStaticWebsite toDataLakeStaticWebsite(StaticWebsite staticWebsite) { if (staticWebsite == null) { return null; } return new DataLakeStaticWebsite() .setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath()) .setEnabled(staticWebsite.isEnabled()) .setErrorDocument404Path(staticWebsite.getErrorDocument404Path()) .setIndexDocument(staticWebsite.getIndexDocument()); } static DataLakeAnalyticsLogging toDataLakeAnalyticsLogging(BlobAnalyticsLogging blobLogging) { if (blobLogging == null) { return null; } return new DataLakeAnalyticsLogging() .setDelete(blobLogging.isDelete()) .setRead(blobLogging.isRead()) .setWrite(blobLogging.isWrite()) .setRetentionPolicy(toDataLakeRetentionPolicy(blobLogging.getRetentionPolicy())) .setVersion(blobLogging.getVersion()); } static DataLakeCorsRule toDataLakeCorsRule(BlobCorsRule blobRule) { if (blobRule == null) { return null; } return new DataLakeCorsRule() .setAllowedHeaders(blobRule.getAllowedHeaders()) .setAllowedMethods(blobRule.getAllowedMethods()) .setAllowedOrigins(blobRule.getAllowedOrigins()) .setExposedHeaders(blobRule.getExposedHeaders()) .setMaxAgeInSeconds(blobRule.getMaxAgeInSeconds()); } static DataLakeMetrics toDataLakeMetrics(BlobMetrics blobMetrics) { if (blobMetrics == null) { return null; } return new DataLakeMetrics() .setEnabled(blobMetrics.isEnabled()) .setIncludeApis(blobMetrics.isIncludeApis()) .setVersion(blobMetrics.getVersion()) .setRetentionPolicy(toDataLakeRetentionPolicy(blobMetrics.getRetentionPolicy())); } static DataLakeRetentionPolicy toDataLakeRetentionPolicy(BlobRetentionPolicy blobPolicy) { if (blobPolicy == null) { return null; } return new DataLakeRetentionPolicy() .setDays(blobPolicy.getDays()) .setEnabled(blobPolicy.isEnabled()); } static BlobServiceProperties toBlobServiceProperties(DataLakeServiceProperties datalakeProperties) { if (datalakeProperties == null) { return null; } return new BlobServiceProperties() .setDefaultServiceVersion(datalakeProperties.getDefaultServiceVersion()) .setCors(datalakeProperties.getCors().stream().map(Transforms::toBlobCorsRule).collect(Collectors.toList())) .setDeleteRetentionPolicy(toBlobRetentionPolicy(datalakeProperties.getDeleteRetentionPolicy())) .setHourMetrics(toBlobMetrics(datalakeProperties.getHourMetrics())) .setMinuteMetrics(toBlobMetrics(datalakeProperties.getMinuteMetrics())) .setLogging(toBlobAnalyticsLogging(datalakeProperties.getLogging())) .setStaticWebsite(toBlobStaticWebsite(datalakeProperties.getStaticWebsite())); } static StaticWebsite toBlobStaticWebsite(DataLakeStaticWebsite staticWebsite) { if (staticWebsite == null) { return null; } return new StaticWebsite() .setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath()) .setEnabled(staticWebsite.isEnabled()) .setErrorDocument404Path(staticWebsite.getErrorDocument404Path()) .setIndexDocument(staticWebsite.getIndexDocument()); } static BlobAnalyticsLogging toBlobAnalyticsLogging(DataLakeAnalyticsLogging datalakeLogging) { if (datalakeLogging == null) { return null; } return new BlobAnalyticsLogging() .setDelete(datalakeLogging.isDelete()) .setRead(datalakeLogging.isRead()) .setWrite(datalakeLogging.isWrite()) .setRetentionPolicy(toBlobRetentionPolicy(datalakeLogging.getRetentionPolicy())) .setVersion(datalakeLogging.getVersion()); } static BlobCorsRule toBlobCorsRule(DataLakeCorsRule datalakeRule) { if (datalakeRule == null) { return null; } return new BlobCorsRule() .setAllowedHeaders(datalakeRule.getAllowedHeaders()) .setAllowedMethods(datalakeRule.getAllowedMethods()) .setAllowedOrigins(datalakeRule.getAllowedOrigins()) .setExposedHeaders(datalakeRule.getExposedHeaders()) .setMaxAgeInSeconds(datalakeRule.getMaxAgeInSeconds()); } static BlobMetrics toBlobMetrics(DataLakeMetrics datalakeMetrics) { if (datalakeMetrics == null) { return null; } return new BlobMetrics() .setEnabled(datalakeMetrics.isEnabled()) .setIncludeApis(datalakeMetrics.isIncludeApis()) .setVersion(datalakeMetrics.getVersion()) .setRetentionPolicy(toBlobRetentionPolicy(datalakeMetrics.getRetentionPolicy())); } static BlobRetentionPolicy toBlobRetentionPolicy(DataLakeRetentionPolicy datalakePolicy) { if (datalakePolicy == null) { return null; } return new BlobRetentionPolicy() .setDays(datalakePolicy.getDays()) .setEnabled(datalakePolicy.isEnabled()); } static PathDeletedItem toPathDeletedItem(BlobItemInternal blobItem) { if (blobItem == null) { return null; } return new PathDeletedItem(blobItem.getName(), false, blobItem.getDeletionId(), blobItem.getProperties().getDeletedTime(), blobItem.getProperties().getRemainingRetentionDays()); } static PathDeletedItem toPathDeletedItem(BlobPrefix blobPrefix) { return new PathDeletedItem(blobPrefix.getName(), true, null, null, null); } static CustomerProvidedKey toBlobCustomerProvidedKey( com.azure.storage.file.datalake.models.CustomerProvidedKey key) { if (key == null) { return null; } return new CustomerProvidedKey(key.getKey()); } static CpkInfo fromBlobCpkInfo(com.azure.storage.blob.models.CpkInfo info) { if (info == null) { return null; } return new CpkInfo() .setEncryptionKey(info.getEncryptionKey()) .setEncryptionAlgorithm(com.azure.storage.file.datalake.models.EncryptionAlgorithmType.fromString( info.getEncryptionAlgorithm().toString())) .setEncryptionKeySha256(info.getEncryptionKeySha256()); } static BlobContainerEncryptionScope toBlobContainerEncryptionScope(FileSystemEncryptionScopeOptions fileSystemEncryptionScope) { if (fileSystemEncryptionScope == null) { return null; } return new BlobContainerEncryptionScope() .setDefaultEncryptionScope(fileSystemEncryptionScope.getDefaultEncryptionScope()) .setEncryptionScopeOverridePrevented(fileSystemEncryptionScope.isEncryptionScopeOverridePrevented()); } }
class Transforms { private static final String SERIALIZATION_MESSAGE = String.format("'serialization' must be one of %s, %s, %s or " + "%s.", FileQueryJsonSerialization.class.getSimpleName(), FileQueryDelimitedSerialization.class.getSimpleName(), FileQueryArrowSerialization.class.getSimpleName(), FileQueryParquetSerialization.class.getSimpleName()); private static final long EPOCH_CONVERSION; public static final HttpHeaderName X_MS_ENCRYPTION_CONTEXT = HttpHeaderName.fromString("x-ms-encryption-context"); static { GregorianCalendar unixEpoch = new GregorianCalendar(); unixEpoch.clear(); unixEpoch.set(1970, Calendar.JANUARY, 1, 0, 0, 0); GregorianCalendar windowsEpoch = new GregorianCalendar(); windowsEpoch.clear(); windowsEpoch.set(1601, Calendar.JANUARY, 1, 0, 0, 0); EPOCH_CONVERSION = unixEpoch.getTimeInMillis() - windowsEpoch.getTimeInMillis(); } static com.azure.storage.blob.models.PublicAccessType toBlobPublicAccessType(PublicAccessType fileSystemPublicAccessType) { if (fileSystemPublicAccessType == null) { return null; } return com.azure.storage.blob.models.PublicAccessType.fromString(fileSystemPublicAccessType.toString()); } private static LeaseDurationType toDataLakeLeaseDurationType(com.azure.storage.blob.models.LeaseDurationType blobLeaseDurationType) { if (blobLeaseDurationType == null) { return null; } return LeaseDurationType.fromString(blobLeaseDurationType.toString()); } private static LeaseStateType toDataLakeLeaseStateType(com.azure.storage.blob.models.LeaseStateType blobLeaseStateType) { if (blobLeaseStateType == null) { return null; } return LeaseStateType.fromString(blobLeaseStateType.toString()); } private static LeaseStatusType toDataLakeLeaseStatusType(com.azure.storage.blob.models.LeaseStatusType blobLeaseStatusType) { if (blobLeaseStatusType == null) { return null; } return LeaseStatusType.fromString(blobLeaseStatusType.toString()); } private static PublicAccessType toDataLakePublicAccessType(com.azure.storage.blob.models.PublicAccessType blobPublicAccessType) { if (blobPublicAccessType == null) { return null; } return PublicAccessType.fromString(blobPublicAccessType.toString()); } private static CopyStatusType toDataLakeCopyStatusType( com.azure.storage.blob.models.CopyStatusType blobCopyStatus) { if (blobCopyStatus == null) { return null; } return CopyStatusType.fromString(blobCopyStatus.toString()); } private static ArchiveStatus toDataLakeArchiveStatus( com.azure.storage.blob.models.ArchiveStatus blobArchiveStatus) { if (blobArchiveStatus == null) { return null; } return ArchiveStatus.fromString(blobArchiveStatus.toString()); } private static AccessTier toDataLakeAccessTier(com.azure.storage.blob.models.AccessTier blobAccessTier) { if (blobAccessTier == null) { return null; } return AccessTier.fromString(blobAccessTier.toString()); } static FileSystemProperties toFileSystemProperties(BlobContainerProperties blobContainerProperties) { if (blobContainerProperties == null) { return null; } FileSystemProperties fileSystemProperties = new FileSystemProperties(blobContainerProperties.getMetadata(), blobContainerProperties.getETag(), blobContainerProperties.getLastModified(), Transforms.toDataLakeLeaseDurationType(blobContainerProperties.getLeaseDuration()), Transforms.toDataLakeLeaseStateType(blobContainerProperties.getLeaseState()), Transforms.toDataLakeLeaseStatusType(blobContainerProperties.getLeaseStatus()), Transforms.toDataLakePublicAccessType(blobContainerProperties.getBlobPublicAccess()), blobContainerProperties.hasImmutabilityPolicy(), blobContainerProperties.hasLegalHold()); return AccessorUtility.getFileSystemPropertiesAccessor() .setFileSystemProperties(fileSystemProperties, blobContainerProperties.getDefaultEncryptionScope(), blobContainerProperties.isEncryptionScopeOverridePrevented()); } private static BlobContainerListDetails toBlobContainerListDetails(FileSystemListDetails fileSystemListDetails) { if (fileSystemListDetails == null) { return null; } return new BlobContainerListDetails() .setRetrieveMetadata(fileSystemListDetails.getRetrieveMetadata()) .setRetrieveDeleted(fileSystemListDetails.getRetrieveDeleted()) .setRetrieveSystemContainers(fileSystemListDetails.getRetrieveSystemFileSystems()); } static ListBlobContainersOptions toListBlobContainersOptions(ListFileSystemsOptions listFileSystemsOptions) { if (listFileSystemsOptions == null) { return null; } return new ListBlobContainersOptions() .setDetails(toBlobContainerListDetails(listFileSystemsOptions.getDetails())) .setMaxResultsPerPage(listFileSystemsOptions.getMaxResultsPerPage()) .setPrefix(listFileSystemsOptions.getPrefix()); } static UserDelegationKey toDataLakeUserDelegationKey(com.azure.storage.blob.models.UserDelegationKey blobUserDelegationKey) { if (blobUserDelegationKey == null) { return null; } return new UserDelegationKey() .setSignedExpiry(blobUserDelegationKey.getSignedExpiry()) .setSignedObjectId(blobUserDelegationKey.getSignedObjectId()) .setSignedTenantId(blobUserDelegationKey.getSignedTenantId()) .setSignedService(blobUserDelegationKey.getSignedService()) .setSignedStart(blobUserDelegationKey.getSignedStart()) .setSignedVersion(blobUserDelegationKey.getSignedVersion()) .setValue(blobUserDelegationKey.getValue()); } static BlobHttpHeaders toBlobHttpHeaders(PathHttpHeaders pathHTTPHeaders) { if (pathHTTPHeaders == null) { return null; } return new BlobHttpHeaders() .setCacheControl(pathHTTPHeaders.getCacheControl()) .setContentDisposition(pathHTTPHeaders.getContentDisposition()) .setContentEncoding(pathHTTPHeaders.getContentEncoding()) .setContentLanguage(pathHTTPHeaders.getContentLanguage()) .setContentType(pathHTTPHeaders.getContentType()) .setContentMd5(pathHTTPHeaders.getContentMd5()); } static BlobInputStreamOptions toBlobInputStreamOptions(DataLakeFileInputStreamOptions options) { if (options == null) { return null; } return new BlobInputStreamOptions() .setBlockSize(options.getBlockSize()) .setRange(toBlobRange(options.getRange())) .setRequestConditions(toBlobRequestConditions(options.getRequestConditions())) .setConsistentReadControl(toBlobConsistentReadControl(options.getConsistentReadControl())); } static com.azure.storage.blob.models.ConsistentReadControl toBlobConsistentReadControl( com.azure.storage.file.datalake.models.ConsistentReadControl datalakeConsistentReadControl) { if (datalakeConsistentReadControl == null) { return null; } switch (datalakeConsistentReadControl) { case NONE: return ConsistentReadControl.NONE; case ETAG: return ConsistentReadControl.ETAG; default: throw new IllegalArgumentException("Could not convert ConsistentReadControl"); } } static BlobRange toBlobRange(FileRange fileRange) { if (fileRange == null) { return null; } return new BlobRange(fileRange.getOffset(), fileRange.getCount()); } static com.azure.storage.blob.models.DownloadRetryOptions toBlobDownloadRetryOptions( DownloadRetryOptions dataLakeOptions) { if (dataLakeOptions == null) { return null; } return new com.azure.storage.blob.models.DownloadRetryOptions() .setMaxRetryRequests(dataLakeOptions.getMaxRetryRequests()); } static PathProperties toPathProperties(BlobProperties properties) { return toPathProperties(properties, null); } static PathProperties toPathProperties(BlobProperties properties, String encryptionContext) { if (properties == null) { return null; } else { PathProperties pathProperties = new PathProperties(properties.getCreationTime(), properties.getLastModified(), properties.getETag(), properties.getBlobSize(), properties.getContentType(), properties.getContentMd5(), properties.getContentEncoding(), properties.getContentDisposition(), properties.getContentLanguage(), properties.getCacheControl(), Transforms.toDataLakeLeaseStatusType(properties.getLeaseStatus()), Transforms.toDataLakeLeaseStateType(properties.getLeaseState()), Transforms.toDataLakeLeaseDurationType(properties.getLeaseDuration()), properties.getCopyId(), Transforms.toDataLakeCopyStatusType(properties.getCopyStatus()), properties.getCopySource(), properties.getCopyProgress(), properties.getCopyCompletionTime(), properties.getCopyStatusDescription(), properties.isServerEncrypted(), properties.isIncrementalCopy(), Transforms.toDataLakeAccessTier(properties.getAccessTier()), Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(), properties.getAccessTierChangeTime(), properties.getMetadata(), properties.getExpiresOn()); return AccessorUtility.getPathPropertiesAccessor().setPathProperties(pathProperties, properties.getEncryptionScope(), encryptionContext); } } static FileSystemItem toFileSystemItem(BlobContainerItem blobContainerItem) { if (blobContainerItem == null) { return null; } return new FileSystemItem() .setName(blobContainerItem.getName()) .setDeleted(blobContainerItem.isDeleted()) .setVersion(blobContainerItem.getVersion()) .setMetadata(blobContainerItem.getMetadata()) .setProperties(Transforms.toFileSystemItemProperties(blobContainerItem.getProperties())); } private static FileSystemItemProperties toFileSystemItemProperties( BlobContainerItemProperties blobContainerItemProperties) { if (blobContainerItemProperties == null) { return null; } return new FileSystemItemProperties() .setETag(blobContainerItemProperties.getETag()) .setLastModified(blobContainerItemProperties.getLastModified()) .setLeaseStatus(toDataLakeLeaseStatusType(blobContainerItemProperties.getLeaseStatus())) .setLeaseState(toDataLakeLeaseStateType(blobContainerItemProperties.getLeaseState())) .setLeaseDuration(toDataLakeLeaseDurationType(blobContainerItemProperties.getLeaseDuration())) .setPublicAccess(toDataLakePublicAccessType(blobContainerItemProperties.getPublicAccess())) .setHasLegalHold(blobContainerItemProperties.isHasLegalHold()) .setHasImmutabilityPolicy(blobContainerItemProperties.isHasImmutabilityPolicy()) .setEncryptionScope(blobContainerItemProperties.getDefaultEncryptionScope()) .setEncryptionScopeOverridePrevented(blobContainerItemProperties.isEncryptionScopeOverridePrevented()); } static PathItem toPathItem(Path path) { if (path == null) { return null; } PathItem pathItem = new PathItem(path.getETag(), parseDateOrNull(path.getLastModified()), path.getContentLength() == null ? 0 : path.getContentLength(), path.getGroup(), path.isDirectory() != null && path.isDirectory(), path.getName(), path.getOwner(), path.getPermissions(), path.getCreationTime() == null ? null : fromWindowsFileTimeOrNull(Long.parseLong(path.getCreationTime())), path.getExpiryTime() == null ? null : fromWindowsFileTimeOrNull(Long.parseLong(path.getExpiryTime()))); return AccessorUtility.getPathItemAccessor().setPathItemProperties(pathItem, path.getEncryptionScope(), path.getEncryptionContext()); } private static OffsetDateTime parseDateOrNull(String date) { return date == null ? null : OffsetDateTime.parse(date, DateTimeFormatter.RFC_1123_DATE_TIME); } private static OffsetDateTime fromWindowsFileTimeOrNull(long fileTime) { if (fileTime == 0) { return null; } long fileTimeMs = fileTime / 10000; long fileTimeUnixEpoch = fileTimeMs - EPOCH_CONVERSION; return Instant.ofEpochMilli(fileTimeUnixEpoch).atOffset(ZoneOffset.UTC); } static BlobRequestConditions toBlobRequestConditions(DataLakeRequestConditions requestConditions) { if (requestConditions == null) { return null; } return new BlobRequestConditions() .setLeaseId(requestConditions.getLeaseId()) .setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince()) .setIfNoneMatch(requestConditions.getIfNoneMatch()) .setIfMatch(requestConditions.getIfMatch()) .setIfModifiedSince(requestConditions.getIfModifiedSince()); } static FileReadResponse toFileReadResponse(BlobDownloadResponse r) { if (r == null) { return null; } return new FileReadResponse(Transforms.toFileReadAsyncResponse(new BlobDownloadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r) { if (r == null) { return null; } return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toPathReadHeaders(r.getDeserializedHeaders(), getEncryptionContext(r))); } private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String encryptionContext) { if (h == null) { return null; } return new FileReadHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMd5(h.getBlobContentMD5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()) .setCreationTime(h.getCreationTime()) .setEncryptionContext(encryptionContext); } static List<BlobSignedIdentifier> toBlobIdentifierList(List<DataLakeSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<BlobSignedIdentifier> blobIdentifiers = new ArrayList<>(); for (DataLakeSignedIdentifier identifier : identifiers) { blobIdentifiers.add(Transforms.toBlobIdentifier(identifier)); } return blobIdentifiers; } private static BlobSignedIdentifier toBlobIdentifier(DataLakeSignedIdentifier identifier) { if (identifier == null) { return null; } return new BlobSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toBlobAccessPolicy(identifier.getAccessPolicy())); } private static BlobAccessPolicy toBlobAccessPolicy(DataLakeAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new BlobAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static FileSystemAccessPolicies toFileSystemAccessPolicies(BlobContainerAccessPolicies accessPolicies) { if (accessPolicies == null) { return null; } return new FileSystemAccessPolicies(Transforms.toDataLakePublicAccessType(accessPolicies.getBlobAccessType()), Transforms.toDataLakeIdentifierList(accessPolicies.getIdentifiers())); } static List<DataLakeSignedIdentifier> toDataLakeIdentifierList(List<BlobSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<DataLakeSignedIdentifier> dataLakeIdentifiers = new ArrayList<>(); for (BlobSignedIdentifier identifier : identifiers) { dataLakeIdentifiers.add(Transforms.toDataLakeIdentifier(identifier)); } return dataLakeIdentifiers; } private static DataLakeSignedIdentifier toDataLakeIdentifier(BlobSignedIdentifier identifier) { if (identifier == null) { return null; } return new DataLakeSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toDataLakeAccessPolicy(identifier.getAccessPolicy())); } private static DataLakeAccessPolicy toDataLakeAccessPolicy(BlobAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new DataLakeAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static BlobQuerySerialization toBlobQuerySerialization(FileQuerySerialization ser) { if (ser == null) { return null; } if (ser instanceof FileQueryJsonSerialization) { FileQueryJsonSerialization jsonSer = (FileQueryJsonSerialization) ser; return new BlobQueryJsonSerialization().setRecordSeparator(jsonSer.getRecordSeparator()); } else if (ser instanceof FileQueryDelimitedSerialization) { FileQueryDelimitedSerialization delSer = (FileQueryDelimitedSerialization) ser; return new BlobQueryDelimitedSerialization() .setColumnSeparator(delSer.getColumnSeparator()) .setEscapeChar(delSer.getEscapeChar()) .setFieldQuote(delSer.getFieldQuote()) .setHeadersPresent(delSer.isHeadersPresent()) .setRecordSeparator(delSer.getRecordSeparator()); } else if (ser instanceof FileQueryArrowSerialization) { FileQueryArrowSerialization arrSer = (FileQueryArrowSerialization) ser; return new BlobQueryArrowSerialization().setSchema(toBlobQueryArrowSchema(arrSer.getSchema())); } else if (ser instanceof FileQueryParquetSerialization) { return new BlobQueryParquetSerialization(); } else { throw new IllegalArgumentException(SERIALIZATION_MESSAGE); } } private static List<BlobQueryArrowField> toBlobQueryArrowSchema(List<FileQueryArrowField> schema) { if (schema == null) { return null; } List<BlobQueryArrowField> blobSchema = new ArrayList<>(schema.size()); for (FileQueryArrowField field : schema) { blobSchema.add(toBlobQueryArrowField(field)); } return blobSchema; } private static BlobQueryArrowField toBlobQueryArrowField(FileQueryArrowField field) { if (field == null) { return null; } return new BlobQueryArrowField(BlobQueryArrowFieldType.fromString(field.getType().toString())) .setName(field.getName()) .setPrecision(field.getPrecision()) .setScale(field.getScale()); } static Consumer<BlobQueryError> toBlobQueryErrorConsumer(Consumer<FileQueryError> er) { if (er == null) { return null; } return error -> er.accept(toFileQueryError(error)); } static Consumer<BlobQueryProgress> toBlobQueryProgressConsumer(Consumer<FileQueryProgress> pr) { if (pr == null) { return null; } return progress -> pr.accept(toFileQueryProgress(progress)); } private static FileQueryError toFileQueryError(BlobQueryError error) { if (error == null) { return null; } return new FileQueryError(error.isFatal(), error.getName(), error.getDescription(), error.getPosition()); } private static FileQueryProgress toFileQueryProgress(BlobQueryProgress progress) { if (progress == null) { return null; } return new FileQueryProgress(progress.getBytesScanned(), progress.getTotalBytes()); } static FileQueryResponse toFileQueryResponse(BlobQueryResponse r) { if (r == null) { return null; } return new FileQueryResponse(Transforms.toFileQueryAsyncResponse(new BlobQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileQueryAsyncResponse toFileQueryAsyncResponse(BlobQueryAsyncResponse r) { if (r == null) { return null; } return new FileQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toFileQueryHeaders(r.getDeserializedHeaders())); } private static FileQueryHeaders toFileQueryHeaders(BlobQueryHeaders h) { if (h == null) { return null; } return new FileQueryHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMd5(h.getContentMd5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()); } static BlobQueryOptions toBlobQueryOptions(FileQueryOptions options) { if (options == null) { return null; } if (options.getOutputStream() == null) { return new BlobQueryOptions(options.getExpression()) .setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization())) .setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization())) .setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions())) .setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer())) .setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer())); } else { return new BlobQueryOptions(options.getExpression(), options.getOutputStream()) .setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization())) .setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization())) .setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions())) .setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer())) .setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer())); } } static UndeleteBlobContainerOptions toBlobContainerUndeleteOptions(FileSystemUndeleteOptions options) { if (options == null) { return null; } return new UndeleteBlobContainerOptions(options.getDeletedFileSystemName(), options.getDeletedFileSystemVersion()).setDestinationContainerName(options.getDestinationFileSystemName()); } static DataLakeServiceProperties toDataLakeServiceProperties(BlobServiceProperties blobProps) { if (blobProps == null) { return null; } return new DataLakeServiceProperties() .setDefaultServiceVersion(blobProps.getDefaultServiceVersion()) .setCors(blobProps.getCors().stream().map(Transforms::toDataLakeCorsRule).collect(Collectors.toList())) .setDeleteRetentionPolicy(toDataLakeRetentionPolicy(blobProps.getDeleteRetentionPolicy())) .setHourMetrics(toDataLakeMetrics(blobProps.getHourMetrics())) .setMinuteMetrics(toDataLakeMetrics(blobProps.getMinuteMetrics())) .setLogging(toDataLakeAnalyticsLogging(blobProps.getLogging())) .setStaticWebsite(toDataLakeStaticWebsite(blobProps.getStaticWebsite())); } static DataLakeStaticWebsite toDataLakeStaticWebsite(StaticWebsite staticWebsite) { if (staticWebsite == null) { return null; } return new DataLakeStaticWebsite() .setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath()) .setEnabled(staticWebsite.isEnabled()) .setErrorDocument404Path(staticWebsite.getErrorDocument404Path()) .setIndexDocument(staticWebsite.getIndexDocument()); } static DataLakeAnalyticsLogging toDataLakeAnalyticsLogging(BlobAnalyticsLogging blobLogging) { if (blobLogging == null) { return null; } return new DataLakeAnalyticsLogging() .setDelete(blobLogging.isDelete()) .setRead(blobLogging.isRead()) .setWrite(blobLogging.isWrite()) .setRetentionPolicy(toDataLakeRetentionPolicy(blobLogging.getRetentionPolicy())) .setVersion(blobLogging.getVersion()); } static DataLakeCorsRule toDataLakeCorsRule(BlobCorsRule blobRule) { if (blobRule == null) { return null; } return new DataLakeCorsRule() .setAllowedHeaders(blobRule.getAllowedHeaders()) .setAllowedMethods(blobRule.getAllowedMethods()) .setAllowedOrigins(blobRule.getAllowedOrigins()) .setExposedHeaders(blobRule.getExposedHeaders()) .setMaxAgeInSeconds(blobRule.getMaxAgeInSeconds()); } static DataLakeMetrics toDataLakeMetrics(BlobMetrics blobMetrics) { if (blobMetrics == null) { return null; } return new DataLakeMetrics() .setEnabled(blobMetrics.isEnabled()) .setIncludeApis(blobMetrics.isIncludeApis()) .setVersion(blobMetrics.getVersion()) .setRetentionPolicy(toDataLakeRetentionPolicy(blobMetrics.getRetentionPolicy())); } static DataLakeRetentionPolicy toDataLakeRetentionPolicy(BlobRetentionPolicy blobPolicy) { if (blobPolicy == null) { return null; } return new DataLakeRetentionPolicy() .setDays(blobPolicy.getDays()) .setEnabled(blobPolicy.isEnabled()); } static BlobServiceProperties toBlobServiceProperties(DataLakeServiceProperties datalakeProperties) { if (datalakeProperties == null) { return null; } return new BlobServiceProperties() .setDefaultServiceVersion(datalakeProperties.getDefaultServiceVersion()) .setCors(datalakeProperties.getCors().stream().map(Transforms::toBlobCorsRule).collect(Collectors.toList())) .setDeleteRetentionPolicy(toBlobRetentionPolicy(datalakeProperties.getDeleteRetentionPolicy())) .setHourMetrics(toBlobMetrics(datalakeProperties.getHourMetrics())) .setMinuteMetrics(toBlobMetrics(datalakeProperties.getMinuteMetrics())) .setLogging(toBlobAnalyticsLogging(datalakeProperties.getLogging())) .setStaticWebsite(toBlobStaticWebsite(datalakeProperties.getStaticWebsite())); } static StaticWebsite toBlobStaticWebsite(DataLakeStaticWebsite staticWebsite) { if (staticWebsite == null) { return null; } return new StaticWebsite() .setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath()) .setEnabled(staticWebsite.isEnabled()) .setErrorDocument404Path(staticWebsite.getErrorDocument404Path()) .setIndexDocument(staticWebsite.getIndexDocument()); } static BlobAnalyticsLogging toBlobAnalyticsLogging(DataLakeAnalyticsLogging datalakeLogging) { if (datalakeLogging == null) { return null; } return new BlobAnalyticsLogging() .setDelete(datalakeLogging.isDelete()) .setRead(datalakeLogging.isRead()) .setWrite(datalakeLogging.isWrite()) .setRetentionPolicy(toBlobRetentionPolicy(datalakeLogging.getRetentionPolicy())) .setVersion(datalakeLogging.getVersion()); } static BlobCorsRule toBlobCorsRule(DataLakeCorsRule datalakeRule) { if (datalakeRule == null) { return null; } return new BlobCorsRule() .setAllowedHeaders(datalakeRule.getAllowedHeaders()) .setAllowedMethods(datalakeRule.getAllowedMethods()) .setAllowedOrigins(datalakeRule.getAllowedOrigins()) .setExposedHeaders(datalakeRule.getExposedHeaders()) .setMaxAgeInSeconds(datalakeRule.getMaxAgeInSeconds()); } static BlobMetrics toBlobMetrics(DataLakeMetrics datalakeMetrics) { if (datalakeMetrics == null) { return null; } return new BlobMetrics() .setEnabled(datalakeMetrics.isEnabled()) .setIncludeApis(datalakeMetrics.isIncludeApis()) .setVersion(datalakeMetrics.getVersion()) .setRetentionPolicy(toBlobRetentionPolicy(datalakeMetrics.getRetentionPolicy())); } static BlobRetentionPolicy toBlobRetentionPolicy(DataLakeRetentionPolicy datalakePolicy) { if (datalakePolicy == null) { return null; } return new BlobRetentionPolicy() .setDays(datalakePolicy.getDays()) .setEnabled(datalakePolicy.isEnabled()); } static PathDeletedItem toPathDeletedItem(BlobItemInternal blobItem) { if (blobItem == null) { return null; } return new PathDeletedItem(blobItem.getName(), false, blobItem.getDeletionId(), blobItem.getProperties().getDeletedTime(), blobItem.getProperties().getRemainingRetentionDays()); } static PathDeletedItem toPathDeletedItem(BlobPrefix blobPrefix) { return new PathDeletedItem(blobPrefix.getName(), true, null, null, null); } static CustomerProvidedKey toBlobCustomerProvidedKey( com.azure.storage.file.datalake.models.CustomerProvidedKey key) { if (key == null) { return null; } return new CustomerProvidedKey(key.getKey()); } static CpkInfo fromBlobCpkInfo(com.azure.storage.blob.models.CpkInfo info) { if (info == null) { return null; } return new CpkInfo() .setEncryptionKey(info.getEncryptionKey()) .setEncryptionAlgorithm(com.azure.storage.file.datalake.models.EncryptionAlgorithmType.fromString( info.getEncryptionAlgorithm().toString())) .setEncryptionKeySha256(info.getEncryptionKeySha256()); } static BlobContainerEncryptionScope toBlobContainerEncryptionScope(FileSystemEncryptionScopeOptions fileSystemEncryptionScope) { if (fileSystemEncryptionScope == null) { return null; } return new BlobContainerEncryptionScope() .setDefaultEncryptionScope(fileSystemEncryptionScope.getDefaultEncryptionScope()) .setEncryptionScopeOverridePrevented(fileSystemEncryptionScope.isEncryptionScopeOverridePrevented()); } }
since this list will never change, so probably can just declare in the constructor or during static initialization, instead of creating one everytime
public List<ResponseTimeoutAndDelays> getTimeoutList() { return Collections.unmodifiableList(Arrays.asList(new ResponseTimeoutAndDelays(Duration.ofMillis(500), 0), new ResponseTimeoutAndDelays(Duration.ofSeconds(5), 1), new ResponseTimeoutAndDelays(Duration.ofSeconds(10), 0))); }
return Collections.unmodifiableList(Arrays.asList(new ResponseTimeoutAndDelays(Duration.ofMillis(500), 0),
public List<ResponseTimeoutAndDelays> getTimeoutList() { return Collections.unmodifiableList(Arrays.asList(new ResponseTimeoutAndDelays(Duration.ofMillis(500), 0), new ResponseTimeoutAndDelays(Duration.ofSeconds(5), 1), new ResponseTimeoutAndDelays(Duration.ofSeconds(10), 0))); }
class HttpTimeoutPolicyControlPlaneHotPath extends HttpTimeoutPolicy { public static final HttpTimeoutPolicy INSTANCE = new HttpTimeoutPolicyControlPlaneHotPath(); private HttpTimeoutPolicyControlPlaneHotPath() { } @Override @Override public boolean isSafeToRetry(HttpMethod httpMethod) { return true; } }
class HttpTimeoutPolicyControlPlaneHotPath extends HttpTimeoutPolicy { public static final HttpTimeoutPolicy INSTANCE = new HttpTimeoutPolicyControlPlaneHotPath(); private HttpTimeoutPolicyControlPlaneHotPath() { timeoutAndDelaysList = getTimeoutList(); } }
The HTTP Default Policy timeout is 65 seconds for all retries in .net. Should we keep it 60 for java?
public void verifyHttpTimeoutPolicyResponseTimeout() throws Exception { GlobalEndpointManager endpointManager = Mockito.mock(GlobalEndpointManager.class); Mockito.doReturn(new URI("http: Mockito.doReturn(Mono.empty()).when(endpointManager).refreshLocationAsync(Mockito.eq(null), Mockito.eq(true)); RetryContext retryContext = new RetryContext(); WebExceptionRetryPolicy retryPolicy = new WebExceptionRetryPolicy(retryContext); Exception exception = ReadTimeoutException.INSTANCE; CosmosException cosmosException = BridgeInternal.createCosmosException(null, HttpConstants.StatusCodes.REQUEST_TIMEOUT, exception); BridgeInternal.setSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); RxDocumentServiceRequest dsr; dsr = RxDocumentServiceRequest.createFromName(mockDiagnosticsClientContext(), OperationType.QueryPlan, "/dbs/db/colls/col/docs/doc", ResourceType.Document); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofMillis(500)); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(5)); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(10)); dsr = RxDocumentServiceRequest.createFromName(mockDiagnosticsClientContext(), OperationType.Read, "/dbs/db/colls/col/docs/doc", ResourceType.Document); retryContext = new RetryContext(); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(65)); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(65)); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(65)); }
assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(65));
public void verifyHttpTimeoutPolicyResponseTimeout() throws Exception { GlobalEndpointManager endpointManager = Mockito.mock(GlobalEndpointManager.class); Mockito.doReturn(new URI("http: Mockito.doReturn(Mono.empty()).when(endpointManager).refreshLocationAsync(Mockito.eq(null), Mockito.eq(true)); Exception exception = ReadTimeoutException.INSTANCE; CosmosException cosmosException = BridgeInternal.createCosmosException(null, HttpConstants.StatusCodes.REQUEST_TIMEOUT, exception); BridgeInternal.setSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); RxDocumentServiceRequest dsr; dsr = RxDocumentServiceRequest.createFromName(mockDiagnosticsClientContext(), OperationType.QueryPlan, "/dbs/db/colls/col/docs/doc", ResourceType.Document); RetryContext retryContext = new RetryContext(); WebExceptionRetryPolicy retryPolicy = new WebExceptionRetryPolicy(retryContext); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofMillis(500)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(0); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(5)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(1); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(10)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(0); dsr = RxDocumentServiceRequest.createFromName(mockDiagnosticsClientContext(), OperationType.Read, "/dbs/db/colls/col/docs/doc", ResourceType.Document); retryContext = new RetryContext(); retryPolicy = new WebExceptionRetryPolicy(retryContext); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(60)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(0); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(60)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(1); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(60)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(0); dsr = RxDocumentServiceRequest.createFromName(mockDiagnosticsClientContext(), OperationType.Read, "/dbs/db/colls/col/docs/doc", ResourceType.DatabaseAccount); retryContext = new RetryContext(); retryPolicy = new WebExceptionRetryPolicy(retryContext); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(5)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(0); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(10)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(1); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(20)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(0); }
class HttpTimeoutPolicyTest { private final static int TIMEOUT = 10000; @Test(groups = "unit") }
class HttpTimeoutPolicyTest { private final static String BACKOFF_TIME_PROPERTY_NAME = "backoffSecondsTimeout"; @Test(groups = "unit") public int getBackOffTime(WebExceptionRetryPolicy retryPolicy) throws Exception { Field field = retryPolicy.getClass().getDeclaredField(BACKOFF_TIME_PROPERTY_NAME); field.setAccessible(true); return (int) field.get(retryPolicy); } }
I thought I saw some discussion around 60s(Java SDK default) or 65s, and we are going to keep use 60s?
public List<ResponseTimeoutAndDelays> getTimeoutList() { return Collections.unmodifiableList(Arrays.asList(new ResponseTimeoutAndDelays(Duration.ofSeconds(65), 0), new ResponseTimeoutAndDelays(Duration.ofSeconds(65), 1), new ResponseTimeoutAndDelays(Duration.ofSeconds(65), 0))); }
return Collections.unmodifiableList(Arrays.asList(new ResponseTimeoutAndDelays(Duration.ofSeconds(65), 0),
public List<ResponseTimeoutAndDelays> getTimeoutList() { return Collections.unmodifiableList(Arrays.asList(new ResponseTimeoutAndDelays(Duration.ofSeconds(60), 0), new ResponseTimeoutAndDelays(Duration.ofSeconds(60), 1), new ResponseTimeoutAndDelays(Duration.ofSeconds(60), 0))); }
class HttpTimeoutPolicyDefault extends HttpTimeoutPolicy { public static final HttpTimeoutPolicy INSTANCE = new HttpTimeoutPolicyDefault(); private HttpTimeoutPolicyDefault() { } @Override @Override public boolean isSafeToRetry(HttpMethod httpMethod) { return httpMethod == HttpMethod.GET; } }
class HttpTimeoutPolicyDefault extends HttpTimeoutPolicy { public static final HttpTimeoutPolicy INSTANCE = new HttpTimeoutPolicyDefault(); private HttpTimeoutPolicyDefault() { timeoutAndDelaysList = getTimeoutList(); } }
since in java, we have used 60s as default, so we should align with it
public void verifyHttpTimeoutPolicyResponseTimeout() throws Exception { GlobalEndpointManager endpointManager = Mockito.mock(GlobalEndpointManager.class); Mockito.doReturn(new URI("http: Mockito.doReturn(Mono.empty()).when(endpointManager).refreshLocationAsync(Mockito.eq(null), Mockito.eq(true)); RetryContext retryContext = new RetryContext(); WebExceptionRetryPolicy retryPolicy = new WebExceptionRetryPolicy(retryContext); Exception exception = ReadTimeoutException.INSTANCE; CosmosException cosmosException = BridgeInternal.createCosmosException(null, HttpConstants.StatusCodes.REQUEST_TIMEOUT, exception); BridgeInternal.setSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); RxDocumentServiceRequest dsr; dsr = RxDocumentServiceRequest.createFromName(mockDiagnosticsClientContext(), OperationType.QueryPlan, "/dbs/db/colls/col/docs/doc", ResourceType.Document); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofMillis(500)); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(5)); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(10)); dsr = RxDocumentServiceRequest.createFromName(mockDiagnosticsClientContext(), OperationType.Read, "/dbs/db/colls/col/docs/doc", ResourceType.Document); retryContext = new RetryContext(); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(65)); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(65)); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(65)); }
assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(65));
public void verifyHttpTimeoutPolicyResponseTimeout() throws Exception { GlobalEndpointManager endpointManager = Mockito.mock(GlobalEndpointManager.class); Mockito.doReturn(new URI("http: Mockito.doReturn(Mono.empty()).when(endpointManager).refreshLocationAsync(Mockito.eq(null), Mockito.eq(true)); Exception exception = ReadTimeoutException.INSTANCE; CosmosException cosmosException = BridgeInternal.createCosmosException(null, HttpConstants.StatusCodes.REQUEST_TIMEOUT, exception); BridgeInternal.setSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); RxDocumentServiceRequest dsr; dsr = RxDocumentServiceRequest.createFromName(mockDiagnosticsClientContext(), OperationType.QueryPlan, "/dbs/db/colls/col/docs/doc", ResourceType.Document); RetryContext retryContext = new RetryContext(); WebExceptionRetryPolicy retryPolicy = new WebExceptionRetryPolicy(retryContext); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofMillis(500)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(0); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(5)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(1); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(10)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(0); dsr = RxDocumentServiceRequest.createFromName(mockDiagnosticsClientContext(), OperationType.Read, "/dbs/db/colls/col/docs/doc", ResourceType.Document); retryContext = new RetryContext(); retryPolicy = new WebExceptionRetryPolicy(retryContext); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(60)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(0); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(60)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(1); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(60)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(0); dsr = RxDocumentServiceRequest.createFromName(mockDiagnosticsClientContext(), OperationType.Read, "/dbs/db/colls/col/docs/doc", ResourceType.DatabaseAccount); retryContext = new RetryContext(); retryPolicy = new WebExceptionRetryPolicy(retryContext); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(5)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(0); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(10)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(1); retryContext.addStatusAndSubStatusCode(408, 10002); retryPolicy.onBeforeSendRequest(dsr); assertThat(dsr.getResponseTimeout()).isEqualTo(Duration.ofSeconds(20)); assertThat(getBackOffTime(retryPolicy)).isEqualTo(0); }
class HttpTimeoutPolicyTest { private final static int TIMEOUT = 10000; @Test(groups = "unit") }
class HttpTimeoutPolicyTest { private final static String BACKOFF_TIME_PROPERTY_NAME = "backoffSecondsTimeout"; @Test(groups = "unit") public int getBackOffTime(WebExceptionRetryPolicy retryPolicy) throws Exception { Field field = retryPolicy.getClass().getDeclaredField(BACKOFF_TIME_PROPERTY_NAME); field.setAccessible(true); return (int) field.get(retryPolicy); } }
As discussed previously, please consider using a static list instead initializing a new one each time
public List<ResponseTimeoutAndDelays> getTimeoutList() { return Collections.unmodifiableList(Arrays.asList(new ResponseTimeoutAndDelays(Duration.ofMillis(500), 0), new ResponseTimeoutAndDelays(Duration.ofSeconds(5), 1), new ResponseTimeoutAndDelays(Duration.ofSeconds(10), 0))); }
return Collections.unmodifiableList(Arrays.asList(new ResponseTimeoutAndDelays(Duration.ofMillis(500), 0),
public List<ResponseTimeoutAndDelays> getTimeoutList() { return Collections.unmodifiableList(Arrays.asList(new ResponseTimeoutAndDelays(Duration.ofMillis(500), 0), new ResponseTimeoutAndDelays(Duration.ofSeconds(5), 1), new ResponseTimeoutAndDelays(Duration.ofSeconds(10), 0))); }
class HttpTimeoutPolicyControlPlaneHotPath extends HttpTimeoutPolicy { public static final HttpTimeoutPolicy INSTANCE = new HttpTimeoutPolicyControlPlaneHotPath(); private HttpTimeoutPolicyControlPlaneHotPath() { } @Override @Override public boolean isSafeToRetry(HttpMethod httpMethod) { return true; } }
class HttpTimeoutPolicyControlPlaneHotPath extends HttpTimeoutPolicy { public static final HttpTimeoutPolicy INSTANCE = new HttpTimeoutPolicyControlPlaneHotPath(); private HttpTimeoutPolicyControlPlaneHotPath() { timeoutAndDelaysList = getTimeoutList(); } }
Oh, you can just create a local, or shared constant, anywhere you want and it'll be more performant than using a String constant. So instead of the following in `Constants`: `public static final String X_MS_ENCRYPTION_CONTEXT = "x-ms-encryption-context";` you can do `public static final HttpHeaderName X_MS_ENCRYPTION_CONTEXT = HttpHeaderName.fromString("x-ms-encryption-context");`
static String getEncryptionContext(Response<?> r) { if (r == null) { return null; } return r.getHeaders().getValue("x-ms-encryption-context"); }
return r.getHeaders().getValue("x-ms-encryption-context");
static String getEncryptionContext(Response<?> r) { if (r == null) { return null; } return r.getHeaders().getValue(X_MS_ENCRYPTION_CONTEXT); }
class Transforms { private static final String SERIALIZATION_MESSAGE = String.format("'serialization' must be one of %s, %s, %s or " + "%s.", FileQueryJsonSerialization.class.getSimpleName(), FileQueryDelimitedSerialization.class.getSimpleName(), FileQueryArrowSerialization.class.getSimpleName(), FileQueryParquetSerialization.class.getSimpleName()); private static final long EPOCH_CONVERSION; static { GregorianCalendar unixEpoch = new GregorianCalendar(); unixEpoch.clear(); unixEpoch.set(1970, Calendar.JANUARY, 1, 0, 0, 0); GregorianCalendar windowsEpoch = new GregorianCalendar(); windowsEpoch.clear(); windowsEpoch.set(1601, Calendar.JANUARY, 1, 0, 0, 0); EPOCH_CONVERSION = unixEpoch.getTimeInMillis() - windowsEpoch.getTimeInMillis(); } static com.azure.storage.blob.models.PublicAccessType toBlobPublicAccessType(PublicAccessType fileSystemPublicAccessType) { if (fileSystemPublicAccessType == null) { return null; } return com.azure.storage.blob.models.PublicAccessType.fromString(fileSystemPublicAccessType.toString()); } private static LeaseDurationType toDataLakeLeaseDurationType(com.azure.storage.blob.models.LeaseDurationType blobLeaseDurationType) { if (blobLeaseDurationType == null) { return null; } return LeaseDurationType.fromString(blobLeaseDurationType.toString()); } private static LeaseStateType toDataLakeLeaseStateType(com.azure.storage.blob.models.LeaseStateType blobLeaseStateType) { if (blobLeaseStateType == null) { return null; } return LeaseStateType.fromString(blobLeaseStateType.toString()); } private static LeaseStatusType toDataLakeLeaseStatusType(com.azure.storage.blob.models.LeaseStatusType blobLeaseStatusType) { if (blobLeaseStatusType == null) { return null; } return LeaseStatusType.fromString(blobLeaseStatusType.toString()); } private static PublicAccessType toDataLakePublicAccessType(com.azure.storage.blob.models.PublicAccessType blobPublicAccessType) { if (blobPublicAccessType == null) { return null; } return PublicAccessType.fromString(blobPublicAccessType.toString()); } private static CopyStatusType toDataLakeCopyStatusType( com.azure.storage.blob.models.CopyStatusType blobCopyStatus) { if (blobCopyStatus == null) { return null; } return CopyStatusType.fromString(blobCopyStatus.toString()); } private static ArchiveStatus toDataLakeArchiveStatus( com.azure.storage.blob.models.ArchiveStatus blobArchiveStatus) { if (blobArchiveStatus == null) { return null; } return ArchiveStatus.fromString(blobArchiveStatus.toString()); } private static AccessTier toDataLakeAccessTier(com.azure.storage.blob.models.AccessTier blobAccessTier) { if (blobAccessTier == null) { return null; } return AccessTier.fromString(blobAccessTier.toString()); } static FileSystemProperties toFileSystemProperties(BlobContainerProperties blobContainerProperties) { if (blobContainerProperties == null) { return null; } FileSystemProperties fileSystemProperties = new FileSystemProperties(blobContainerProperties.getMetadata(), blobContainerProperties.getETag(), blobContainerProperties.getLastModified(), Transforms.toDataLakeLeaseDurationType(blobContainerProperties.getLeaseDuration()), Transforms.toDataLakeLeaseStateType(blobContainerProperties.getLeaseState()), Transforms.toDataLakeLeaseStatusType(blobContainerProperties.getLeaseStatus()), Transforms.toDataLakePublicAccessType(blobContainerProperties.getBlobPublicAccess()), blobContainerProperties.hasImmutabilityPolicy(), blobContainerProperties.hasLegalHold()); return AccessorUtility.getFileSystemPropertiesAccessor() .setFileSystemProperties(fileSystemProperties, blobContainerProperties.getDefaultEncryptionScope(), blobContainerProperties.isEncryptionScopeOverridePrevented()); } private static BlobContainerListDetails toBlobContainerListDetails(FileSystemListDetails fileSystemListDetails) { if (fileSystemListDetails == null) { return null; } return new BlobContainerListDetails() .setRetrieveMetadata(fileSystemListDetails.getRetrieveMetadata()) .setRetrieveDeleted(fileSystemListDetails.getRetrieveDeleted()) .setRetrieveSystemContainers(fileSystemListDetails.getRetrieveSystemFileSystems()); } static ListBlobContainersOptions toListBlobContainersOptions(ListFileSystemsOptions listFileSystemsOptions) { if (listFileSystemsOptions == null) { return null; } return new ListBlobContainersOptions() .setDetails(toBlobContainerListDetails(listFileSystemsOptions.getDetails())) .setMaxResultsPerPage(listFileSystemsOptions.getMaxResultsPerPage()) .setPrefix(listFileSystemsOptions.getPrefix()); } static UserDelegationKey toDataLakeUserDelegationKey(com.azure.storage.blob.models.UserDelegationKey blobUserDelegationKey) { if (blobUserDelegationKey == null) { return null; } return new UserDelegationKey() .setSignedExpiry(blobUserDelegationKey.getSignedExpiry()) .setSignedObjectId(blobUserDelegationKey.getSignedObjectId()) .setSignedTenantId(blobUserDelegationKey.getSignedTenantId()) .setSignedService(blobUserDelegationKey.getSignedService()) .setSignedStart(blobUserDelegationKey.getSignedStart()) .setSignedVersion(blobUserDelegationKey.getSignedVersion()) .setValue(blobUserDelegationKey.getValue()); } static BlobHttpHeaders toBlobHttpHeaders(PathHttpHeaders pathHTTPHeaders) { if (pathHTTPHeaders == null) { return null; } return new BlobHttpHeaders() .setCacheControl(pathHTTPHeaders.getCacheControl()) .setContentDisposition(pathHTTPHeaders.getContentDisposition()) .setContentEncoding(pathHTTPHeaders.getContentEncoding()) .setContentLanguage(pathHTTPHeaders.getContentLanguage()) .setContentType(pathHTTPHeaders.getContentType()) .setContentMd5(pathHTTPHeaders.getContentMd5()); } static BlobInputStreamOptions toBlobInputStreamOptions(DataLakeFileInputStreamOptions options) { if (options == null) { return null; } return new BlobInputStreamOptions() .setBlockSize(options.getBlockSize()) .setRange(toBlobRange(options.getRange())) .setRequestConditions(toBlobRequestConditions(options.getRequestConditions())) .setConsistentReadControl(toBlobConsistentReadControl(options.getConsistentReadControl())); } static com.azure.storage.blob.models.ConsistentReadControl toBlobConsistentReadControl( com.azure.storage.file.datalake.models.ConsistentReadControl datalakeConsistentReadControl) { if (datalakeConsistentReadControl == null) { return null; } switch (datalakeConsistentReadControl) { case NONE: return ConsistentReadControl.NONE; case ETAG: return ConsistentReadControl.ETAG; default: throw new IllegalArgumentException("Could not convert ConsistentReadControl"); } } static BlobRange toBlobRange(FileRange fileRange) { if (fileRange == null) { return null; } return new BlobRange(fileRange.getOffset(), fileRange.getCount()); } static com.azure.storage.blob.models.DownloadRetryOptions toBlobDownloadRetryOptions( DownloadRetryOptions dataLakeOptions) { if (dataLakeOptions == null) { return null; } return new com.azure.storage.blob.models.DownloadRetryOptions() .setMaxRetryRequests(dataLakeOptions.getMaxRetryRequests()); } static PathProperties toPathProperties(BlobProperties properties, String encryptionContext) { if (properties == null) { return null; } else { PathProperties pathProperties = new PathProperties(properties.getCreationTime(), properties.getLastModified(), properties.getETag(), properties.getBlobSize(), properties.getContentType(), properties.getContentMd5(), properties.getContentEncoding(), properties.getContentDisposition(), properties.getContentLanguage(), properties.getCacheControl(), Transforms.toDataLakeLeaseStatusType(properties.getLeaseStatus()), Transforms.toDataLakeLeaseStateType(properties.getLeaseState()), Transforms.toDataLakeLeaseDurationType(properties.getLeaseDuration()), properties.getCopyId(), Transforms.toDataLakeCopyStatusType(properties.getCopyStatus()), properties.getCopySource(), properties.getCopyProgress(), properties.getCopyCompletionTime(), properties.getCopyStatusDescription(), properties.isServerEncrypted(), properties.isIncrementalCopy(), Transforms.toDataLakeAccessTier(properties.getAccessTier()), Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(), properties.getAccessTierChangeTime(), properties.getMetadata(), properties.getExpiresOn()); return AccessorUtility.getPathPropertiesAccessor().setPathProperties(pathProperties, properties.getEncryptionScope(), encryptionContext); } } static FileSystemItem toFileSystemItem(BlobContainerItem blobContainerItem) { if (blobContainerItem == null) { return null; } return new FileSystemItem() .setName(blobContainerItem.getName()) .setDeleted(blobContainerItem.isDeleted()) .setVersion(blobContainerItem.getVersion()) .setMetadata(blobContainerItem.getMetadata()) .setProperties(Transforms.toFileSystemItemProperties(blobContainerItem.getProperties())); } private static FileSystemItemProperties toFileSystemItemProperties( BlobContainerItemProperties blobContainerItemProperties) { if (blobContainerItemProperties == null) { return null; } return new FileSystemItemProperties() .setETag(blobContainerItemProperties.getETag()) .setLastModified(blobContainerItemProperties.getLastModified()) .setLeaseStatus(toDataLakeLeaseStatusType(blobContainerItemProperties.getLeaseStatus())) .setLeaseState(toDataLakeLeaseStateType(blobContainerItemProperties.getLeaseState())) .setLeaseDuration(toDataLakeLeaseDurationType(blobContainerItemProperties.getLeaseDuration())) .setPublicAccess(toDataLakePublicAccessType(blobContainerItemProperties.getPublicAccess())) .setHasLegalHold(blobContainerItemProperties.isHasLegalHold()) .setHasImmutabilityPolicy(blobContainerItemProperties.isHasImmutabilityPolicy()) .setEncryptionScope(blobContainerItemProperties.getDefaultEncryptionScope()) .setEncryptionScopeOverridePrevented(blobContainerItemProperties.isEncryptionScopeOverridePrevented()); } static PathItem toPathItem(Path path) { if (path == null) { return null; } PathItem pathItem = new PathItem(path.getETag(), parseDateOrNull(path.getLastModified()), path.getContentLength() == null ? 0 : path.getContentLength(), path.getGroup(), path.isDirectory() != null && path.isDirectory(), path.getName(), path.getOwner(), path.getPermissions(), path.getCreationTime() == null ? null : fromWindowsFileTimeOrNull(Long.parseLong(path.getCreationTime())), path.getExpiryTime() == null ? null : fromWindowsFileTimeOrNull(Long.parseLong(path.getExpiryTime()))); return AccessorUtility.getPathItemAccessor().setPathItemProperties(pathItem, path.getEncryptionScope(), path.getEncryptionContext()); } private static OffsetDateTime parseDateOrNull(String date) { return date == null ? null : OffsetDateTime.parse(date, DateTimeFormatter.RFC_1123_DATE_TIME); } private static OffsetDateTime fromWindowsFileTimeOrNull(long fileTime) { if (fileTime == 0) { return null; } long fileTimeMs = fileTime / 10000; long fileTimeUnixEpoch = fileTimeMs - EPOCH_CONVERSION; return Instant.ofEpochMilli(fileTimeUnixEpoch).atOffset(ZoneOffset.UTC); } static BlobRequestConditions toBlobRequestConditions(DataLakeRequestConditions requestConditions) { if (requestConditions == null) { return null; } return new BlobRequestConditions() .setLeaseId(requestConditions.getLeaseId()) .setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince()) .setIfNoneMatch(requestConditions.getIfNoneMatch()) .setIfMatch(requestConditions.getIfMatch()) .setIfModifiedSince(requestConditions.getIfModifiedSince()); } static FileReadResponse toFileReadResponse(BlobDownloadResponse r) { if (r == null) { return null; } return new FileReadResponse(Transforms.toFileReadAsyncResponse(new BlobDownloadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r) { if (r == null) { return null; } return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toPathReadHeaders(r.getDeserializedHeaders(), getEncryptionContext(r))); } private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String encryptionContext) { if (h == null) { return null; } return new FileReadHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMd5(h.getBlobContentMD5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()) .setEncryptionContext(encryptionContext); } static List<BlobSignedIdentifier> toBlobIdentifierList(List<DataLakeSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<BlobSignedIdentifier> blobIdentifiers = new ArrayList<>(); for (DataLakeSignedIdentifier identifier : identifiers) { blobIdentifiers.add(Transforms.toBlobIdentifier(identifier)); } return blobIdentifiers; } private static BlobSignedIdentifier toBlobIdentifier(DataLakeSignedIdentifier identifier) { if (identifier == null) { return null; } return new BlobSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toBlobAccessPolicy(identifier.getAccessPolicy())); } private static BlobAccessPolicy toBlobAccessPolicy(DataLakeAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new BlobAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static FileSystemAccessPolicies toFileSystemAccessPolicies(BlobContainerAccessPolicies accessPolicies) { if (accessPolicies == null) { return null; } return new FileSystemAccessPolicies(Transforms.toDataLakePublicAccessType(accessPolicies.getBlobAccessType()), Transforms.toDataLakeIdentifierList(accessPolicies.getIdentifiers())); } static List<DataLakeSignedIdentifier> toDataLakeIdentifierList(List<BlobSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<DataLakeSignedIdentifier> dataLakeIdentifiers = new ArrayList<>(); for (BlobSignedIdentifier identifier : identifiers) { dataLakeIdentifiers.add(Transforms.toDataLakeIdentifier(identifier)); } return dataLakeIdentifiers; } private static DataLakeSignedIdentifier toDataLakeIdentifier(BlobSignedIdentifier identifier) { if (identifier == null) { return null; } return new DataLakeSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toDataLakeAccessPolicy(identifier.getAccessPolicy())); } private static DataLakeAccessPolicy toDataLakeAccessPolicy(BlobAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new DataLakeAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static BlobQuerySerialization toBlobQuerySerialization(FileQuerySerialization ser) { if (ser == null) { return null; } if (ser instanceof FileQueryJsonSerialization) { FileQueryJsonSerialization jsonSer = (FileQueryJsonSerialization) ser; return new BlobQueryJsonSerialization().setRecordSeparator(jsonSer.getRecordSeparator()); } else if (ser instanceof FileQueryDelimitedSerialization) { FileQueryDelimitedSerialization delSer = (FileQueryDelimitedSerialization) ser; return new BlobQueryDelimitedSerialization() .setColumnSeparator(delSer.getColumnSeparator()) .setEscapeChar(delSer.getEscapeChar()) .setFieldQuote(delSer.getFieldQuote()) .setHeadersPresent(delSer.isHeadersPresent()) .setRecordSeparator(delSer.getRecordSeparator()); } else if (ser instanceof FileQueryArrowSerialization) { FileQueryArrowSerialization arrSer = (FileQueryArrowSerialization) ser; return new BlobQueryArrowSerialization().setSchema(toBlobQueryArrowSchema(arrSer.getSchema())); } else if (ser instanceof FileQueryParquetSerialization) { return new BlobQueryParquetSerialization(); } else { throw new IllegalArgumentException(SERIALIZATION_MESSAGE); } } private static List<BlobQueryArrowField> toBlobQueryArrowSchema(List<FileQueryArrowField> schema) { if (schema == null) { return null; } List<BlobQueryArrowField> blobSchema = new ArrayList<>(schema.size()); for (FileQueryArrowField field : schema) { blobSchema.add(toBlobQueryArrowField(field)); } return blobSchema; } private static BlobQueryArrowField toBlobQueryArrowField(FileQueryArrowField field) { if (field == null) { return null; } return new BlobQueryArrowField(BlobQueryArrowFieldType.fromString(field.getType().toString())) .setName(field.getName()) .setPrecision(field.getPrecision()) .setScale(field.getScale()); } static Consumer<BlobQueryError> toBlobQueryErrorConsumer(Consumer<FileQueryError> er) { if (er == null) { return null; } return error -> er.accept(toFileQueryError(error)); } static Consumer<BlobQueryProgress> toBlobQueryProgressConsumer(Consumer<FileQueryProgress> pr) { if (pr == null) { return null; } return progress -> pr.accept(toFileQueryProgress(progress)); } private static FileQueryError toFileQueryError(BlobQueryError error) { if (error == null) { return null; } return new FileQueryError(error.isFatal(), error.getName(), error.getDescription(), error.getPosition()); } private static FileQueryProgress toFileQueryProgress(BlobQueryProgress progress) { if (progress == null) { return null; } return new FileQueryProgress(progress.getBytesScanned(), progress.getTotalBytes()); } static FileQueryResponse toFileQueryResponse(BlobQueryResponse r) { if (r == null) { return null; } return new FileQueryResponse(Transforms.toFileQueryAsyncResponse(new BlobQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileQueryAsyncResponse toFileQueryAsyncResponse(BlobQueryAsyncResponse r) { if (r == null) { return null; } return new FileQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toFileQueryHeaders(r.getDeserializedHeaders())); } private static FileQueryHeaders toFileQueryHeaders(BlobQueryHeaders h) { if (h == null) { return null; } return new FileQueryHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMd5(h.getContentMd5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()); } static BlobQueryOptions toBlobQueryOptions(FileQueryOptions options) { if (options == null) { return null; } if (options.getOutputStream() == null) { return new BlobQueryOptions(options.getExpression()) .setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization())) .setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization())) .setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions())) .setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer())) .setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer())); } else { return new BlobQueryOptions(options.getExpression(), options.getOutputStream()) .setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization())) .setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization())) .setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions())) .setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer())) .setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer())); } } static UndeleteBlobContainerOptions toBlobContainerUndeleteOptions(FileSystemUndeleteOptions options) { if (options == null) { return null; } return new UndeleteBlobContainerOptions(options.getDeletedFileSystemName(), options.getDeletedFileSystemVersion()).setDestinationContainerName(options.getDestinationFileSystemName()); } static DataLakeServiceProperties toDataLakeServiceProperties(BlobServiceProperties blobProps) { if (blobProps == null) { return null; } return new DataLakeServiceProperties() .setDefaultServiceVersion(blobProps.getDefaultServiceVersion()) .setCors(blobProps.getCors().stream().map(Transforms::toDataLakeCorsRule).collect(Collectors.toList())) .setDeleteRetentionPolicy(toDataLakeRetentionPolicy(blobProps.getDeleteRetentionPolicy())) .setHourMetrics(toDataLakeMetrics(blobProps.getHourMetrics())) .setMinuteMetrics(toDataLakeMetrics(blobProps.getMinuteMetrics())) .setLogging(toDataLakeAnalyticsLogging(blobProps.getLogging())) .setStaticWebsite(toDataLakeStaticWebsite(blobProps.getStaticWebsite())); } static DataLakeStaticWebsite toDataLakeStaticWebsite(StaticWebsite staticWebsite) { if (staticWebsite == null) { return null; } return new DataLakeStaticWebsite() .setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath()) .setEnabled(staticWebsite.isEnabled()) .setErrorDocument404Path(staticWebsite.getErrorDocument404Path()) .setIndexDocument(staticWebsite.getIndexDocument()); } static DataLakeAnalyticsLogging toDataLakeAnalyticsLogging(BlobAnalyticsLogging blobLogging) { if (blobLogging == null) { return null; } return new DataLakeAnalyticsLogging() .setDelete(blobLogging.isDelete()) .setRead(blobLogging.isRead()) .setWrite(blobLogging.isWrite()) .setRetentionPolicy(toDataLakeRetentionPolicy(blobLogging.getRetentionPolicy())) .setVersion(blobLogging.getVersion()); } static DataLakeCorsRule toDataLakeCorsRule(BlobCorsRule blobRule) { if (blobRule == null) { return null; } return new DataLakeCorsRule() .setAllowedHeaders(blobRule.getAllowedHeaders()) .setAllowedMethods(blobRule.getAllowedMethods()) .setAllowedOrigins(blobRule.getAllowedOrigins()) .setExposedHeaders(blobRule.getExposedHeaders()) .setMaxAgeInSeconds(blobRule.getMaxAgeInSeconds()); } static DataLakeMetrics toDataLakeMetrics(BlobMetrics blobMetrics) { if (blobMetrics == null) { return null; } return new DataLakeMetrics() .setEnabled(blobMetrics.isEnabled()) .setIncludeApis(blobMetrics.isIncludeApis()) .setVersion(blobMetrics.getVersion()) .setRetentionPolicy(toDataLakeRetentionPolicy(blobMetrics.getRetentionPolicy())); } static DataLakeRetentionPolicy toDataLakeRetentionPolicy(BlobRetentionPolicy blobPolicy) { if (blobPolicy == null) { return null; } return new DataLakeRetentionPolicy() .setDays(blobPolicy.getDays()) .setEnabled(blobPolicy.isEnabled()); } static BlobServiceProperties toBlobServiceProperties(DataLakeServiceProperties datalakeProperties) { if (datalakeProperties == null) { return null; } return new BlobServiceProperties() .setDefaultServiceVersion(datalakeProperties.getDefaultServiceVersion()) .setCors(datalakeProperties.getCors().stream().map(Transforms::toBlobCorsRule).collect(Collectors.toList())) .setDeleteRetentionPolicy(toBlobRetentionPolicy(datalakeProperties.getDeleteRetentionPolicy())) .setHourMetrics(toBlobMetrics(datalakeProperties.getHourMetrics())) .setMinuteMetrics(toBlobMetrics(datalakeProperties.getMinuteMetrics())) .setLogging(toBlobAnalyticsLogging(datalakeProperties.getLogging())) .setStaticWebsite(toBlobStaticWebsite(datalakeProperties.getStaticWebsite())); } static StaticWebsite toBlobStaticWebsite(DataLakeStaticWebsite staticWebsite) { if (staticWebsite == null) { return null; } return new StaticWebsite() .setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath()) .setEnabled(staticWebsite.isEnabled()) .setErrorDocument404Path(staticWebsite.getErrorDocument404Path()) .setIndexDocument(staticWebsite.getIndexDocument()); } static BlobAnalyticsLogging toBlobAnalyticsLogging(DataLakeAnalyticsLogging datalakeLogging) { if (datalakeLogging == null) { return null; } return new BlobAnalyticsLogging() .setDelete(datalakeLogging.isDelete()) .setRead(datalakeLogging.isRead()) .setWrite(datalakeLogging.isWrite()) .setRetentionPolicy(toBlobRetentionPolicy(datalakeLogging.getRetentionPolicy())) .setVersion(datalakeLogging.getVersion()); } static BlobCorsRule toBlobCorsRule(DataLakeCorsRule datalakeRule) { if (datalakeRule == null) { return null; } return new BlobCorsRule() .setAllowedHeaders(datalakeRule.getAllowedHeaders()) .setAllowedMethods(datalakeRule.getAllowedMethods()) .setAllowedOrigins(datalakeRule.getAllowedOrigins()) .setExposedHeaders(datalakeRule.getExposedHeaders()) .setMaxAgeInSeconds(datalakeRule.getMaxAgeInSeconds()); } static BlobMetrics toBlobMetrics(DataLakeMetrics datalakeMetrics) { if (datalakeMetrics == null) { return null; } return new BlobMetrics() .setEnabled(datalakeMetrics.isEnabled()) .setIncludeApis(datalakeMetrics.isIncludeApis()) .setVersion(datalakeMetrics.getVersion()) .setRetentionPolicy(toBlobRetentionPolicy(datalakeMetrics.getRetentionPolicy())); } static BlobRetentionPolicy toBlobRetentionPolicy(DataLakeRetentionPolicy datalakePolicy) { if (datalakePolicy == null) { return null; } return new BlobRetentionPolicy() .setDays(datalakePolicy.getDays()) .setEnabled(datalakePolicy.isEnabled()); } static PathDeletedItem toPathDeletedItem(BlobItemInternal blobItem) { if (blobItem == null) { return null; } return new PathDeletedItem(blobItem.getName(), false, blobItem.getDeletionId(), blobItem.getProperties().getDeletedTime(), blobItem.getProperties().getRemainingRetentionDays()); } static PathDeletedItem toPathDeletedItem(BlobPrefix blobPrefix) { return new PathDeletedItem(blobPrefix.getName(), true, null, null, null); } static CustomerProvidedKey toBlobCustomerProvidedKey( com.azure.storage.file.datalake.models.CustomerProvidedKey key) { if (key == null) { return null; } return new CustomerProvidedKey(key.getKey()); } static CpkInfo fromBlobCpkInfo(com.azure.storage.blob.models.CpkInfo info) { if (info == null) { return null; } return new CpkInfo() .setEncryptionKey(info.getEncryptionKey()) .setEncryptionAlgorithm(com.azure.storage.file.datalake.models.EncryptionAlgorithmType.fromString( info.getEncryptionAlgorithm().toString())) .setEncryptionKeySha256(info.getEncryptionKeySha256()); } static BlobContainerEncryptionScope toBlobContainerEncryptionScope(FileSystemEncryptionScopeOptions fileSystemEncryptionScope) { if (fileSystemEncryptionScope == null) { return null; } return new BlobContainerEncryptionScope() .setDefaultEncryptionScope(fileSystemEncryptionScope.getDefaultEncryptionScope()) .setEncryptionScopeOverridePrevented(fileSystemEncryptionScope.isEncryptionScopeOverridePrevented()); } }
class Transforms { private static final String SERIALIZATION_MESSAGE = String.format("'serialization' must be one of %s, %s, %s or " + "%s.", FileQueryJsonSerialization.class.getSimpleName(), FileQueryDelimitedSerialization.class.getSimpleName(), FileQueryArrowSerialization.class.getSimpleName(), FileQueryParquetSerialization.class.getSimpleName()); private static final long EPOCH_CONVERSION; public static final HttpHeaderName X_MS_ENCRYPTION_CONTEXT = HttpHeaderName.fromString("x-ms-encryption-context"); static { GregorianCalendar unixEpoch = new GregorianCalendar(); unixEpoch.clear(); unixEpoch.set(1970, Calendar.JANUARY, 1, 0, 0, 0); GregorianCalendar windowsEpoch = new GregorianCalendar(); windowsEpoch.clear(); windowsEpoch.set(1601, Calendar.JANUARY, 1, 0, 0, 0); EPOCH_CONVERSION = unixEpoch.getTimeInMillis() - windowsEpoch.getTimeInMillis(); } static com.azure.storage.blob.models.PublicAccessType toBlobPublicAccessType(PublicAccessType fileSystemPublicAccessType) { if (fileSystemPublicAccessType == null) { return null; } return com.azure.storage.blob.models.PublicAccessType.fromString(fileSystemPublicAccessType.toString()); } private static LeaseDurationType toDataLakeLeaseDurationType(com.azure.storage.blob.models.LeaseDurationType blobLeaseDurationType) { if (blobLeaseDurationType == null) { return null; } return LeaseDurationType.fromString(blobLeaseDurationType.toString()); } private static LeaseStateType toDataLakeLeaseStateType(com.azure.storage.blob.models.LeaseStateType blobLeaseStateType) { if (blobLeaseStateType == null) { return null; } return LeaseStateType.fromString(blobLeaseStateType.toString()); } private static LeaseStatusType toDataLakeLeaseStatusType(com.azure.storage.blob.models.LeaseStatusType blobLeaseStatusType) { if (blobLeaseStatusType == null) { return null; } return LeaseStatusType.fromString(blobLeaseStatusType.toString()); } private static PublicAccessType toDataLakePublicAccessType(com.azure.storage.blob.models.PublicAccessType blobPublicAccessType) { if (blobPublicAccessType == null) { return null; } return PublicAccessType.fromString(blobPublicAccessType.toString()); } private static CopyStatusType toDataLakeCopyStatusType( com.azure.storage.blob.models.CopyStatusType blobCopyStatus) { if (blobCopyStatus == null) { return null; } return CopyStatusType.fromString(blobCopyStatus.toString()); } private static ArchiveStatus toDataLakeArchiveStatus( com.azure.storage.blob.models.ArchiveStatus blobArchiveStatus) { if (blobArchiveStatus == null) { return null; } return ArchiveStatus.fromString(blobArchiveStatus.toString()); } private static AccessTier toDataLakeAccessTier(com.azure.storage.blob.models.AccessTier blobAccessTier) { if (blobAccessTier == null) { return null; } return AccessTier.fromString(blobAccessTier.toString()); } static FileSystemProperties toFileSystemProperties(BlobContainerProperties blobContainerProperties) { if (blobContainerProperties == null) { return null; } FileSystemProperties fileSystemProperties = new FileSystemProperties(blobContainerProperties.getMetadata(), blobContainerProperties.getETag(), blobContainerProperties.getLastModified(), Transforms.toDataLakeLeaseDurationType(blobContainerProperties.getLeaseDuration()), Transforms.toDataLakeLeaseStateType(blobContainerProperties.getLeaseState()), Transforms.toDataLakeLeaseStatusType(blobContainerProperties.getLeaseStatus()), Transforms.toDataLakePublicAccessType(blobContainerProperties.getBlobPublicAccess()), blobContainerProperties.hasImmutabilityPolicy(), blobContainerProperties.hasLegalHold()); return AccessorUtility.getFileSystemPropertiesAccessor() .setFileSystemProperties(fileSystemProperties, blobContainerProperties.getDefaultEncryptionScope(), blobContainerProperties.isEncryptionScopeOverridePrevented()); } private static BlobContainerListDetails toBlobContainerListDetails(FileSystemListDetails fileSystemListDetails) { if (fileSystemListDetails == null) { return null; } return new BlobContainerListDetails() .setRetrieveMetadata(fileSystemListDetails.getRetrieveMetadata()) .setRetrieveDeleted(fileSystemListDetails.getRetrieveDeleted()) .setRetrieveSystemContainers(fileSystemListDetails.getRetrieveSystemFileSystems()); } static ListBlobContainersOptions toListBlobContainersOptions(ListFileSystemsOptions listFileSystemsOptions) { if (listFileSystemsOptions == null) { return null; } return new ListBlobContainersOptions() .setDetails(toBlobContainerListDetails(listFileSystemsOptions.getDetails())) .setMaxResultsPerPage(listFileSystemsOptions.getMaxResultsPerPage()) .setPrefix(listFileSystemsOptions.getPrefix()); } static UserDelegationKey toDataLakeUserDelegationKey(com.azure.storage.blob.models.UserDelegationKey blobUserDelegationKey) { if (blobUserDelegationKey == null) { return null; } return new UserDelegationKey() .setSignedExpiry(blobUserDelegationKey.getSignedExpiry()) .setSignedObjectId(blobUserDelegationKey.getSignedObjectId()) .setSignedTenantId(blobUserDelegationKey.getSignedTenantId()) .setSignedService(blobUserDelegationKey.getSignedService()) .setSignedStart(blobUserDelegationKey.getSignedStart()) .setSignedVersion(blobUserDelegationKey.getSignedVersion()) .setValue(blobUserDelegationKey.getValue()); } static BlobHttpHeaders toBlobHttpHeaders(PathHttpHeaders pathHTTPHeaders) { if (pathHTTPHeaders == null) { return null; } return new BlobHttpHeaders() .setCacheControl(pathHTTPHeaders.getCacheControl()) .setContentDisposition(pathHTTPHeaders.getContentDisposition()) .setContentEncoding(pathHTTPHeaders.getContentEncoding()) .setContentLanguage(pathHTTPHeaders.getContentLanguage()) .setContentType(pathHTTPHeaders.getContentType()) .setContentMd5(pathHTTPHeaders.getContentMd5()); } static BlobInputStreamOptions toBlobInputStreamOptions(DataLakeFileInputStreamOptions options) { if (options == null) { return null; } return new BlobInputStreamOptions() .setBlockSize(options.getBlockSize()) .setRange(toBlobRange(options.getRange())) .setRequestConditions(toBlobRequestConditions(options.getRequestConditions())) .setConsistentReadControl(toBlobConsistentReadControl(options.getConsistentReadControl())); } static com.azure.storage.blob.models.ConsistentReadControl toBlobConsistentReadControl( com.azure.storage.file.datalake.models.ConsistentReadControl datalakeConsistentReadControl) { if (datalakeConsistentReadControl == null) { return null; } switch (datalakeConsistentReadControl) { case NONE: return ConsistentReadControl.NONE; case ETAG: return ConsistentReadControl.ETAG; default: throw new IllegalArgumentException("Could not convert ConsistentReadControl"); } } static BlobRange toBlobRange(FileRange fileRange) { if (fileRange == null) { return null; } return new BlobRange(fileRange.getOffset(), fileRange.getCount()); } static com.azure.storage.blob.models.DownloadRetryOptions toBlobDownloadRetryOptions( DownloadRetryOptions dataLakeOptions) { if (dataLakeOptions == null) { return null; } return new com.azure.storage.blob.models.DownloadRetryOptions() .setMaxRetryRequests(dataLakeOptions.getMaxRetryRequests()); } static PathProperties toPathProperties(BlobProperties properties) { return toPathProperties(properties, null); } static PathProperties toPathProperties(BlobProperties properties, String encryptionContext) { if (properties == null) { return null; } else { PathProperties pathProperties = new PathProperties(properties.getCreationTime(), properties.getLastModified(), properties.getETag(), properties.getBlobSize(), properties.getContentType(), properties.getContentMd5(), properties.getContentEncoding(), properties.getContentDisposition(), properties.getContentLanguage(), properties.getCacheControl(), Transforms.toDataLakeLeaseStatusType(properties.getLeaseStatus()), Transforms.toDataLakeLeaseStateType(properties.getLeaseState()), Transforms.toDataLakeLeaseDurationType(properties.getLeaseDuration()), properties.getCopyId(), Transforms.toDataLakeCopyStatusType(properties.getCopyStatus()), properties.getCopySource(), properties.getCopyProgress(), properties.getCopyCompletionTime(), properties.getCopyStatusDescription(), properties.isServerEncrypted(), properties.isIncrementalCopy(), Transforms.toDataLakeAccessTier(properties.getAccessTier()), Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(), properties.getAccessTierChangeTime(), properties.getMetadata(), properties.getExpiresOn()); return AccessorUtility.getPathPropertiesAccessor().setPathProperties(pathProperties, properties.getEncryptionScope(), encryptionContext); } } static FileSystemItem toFileSystemItem(BlobContainerItem blobContainerItem) { if (blobContainerItem == null) { return null; } return new FileSystemItem() .setName(blobContainerItem.getName()) .setDeleted(blobContainerItem.isDeleted()) .setVersion(blobContainerItem.getVersion()) .setMetadata(blobContainerItem.getMetadata()) .setProperties(Transforms.toFileSystemItemProperties(blobContainerItem.getProperties())); } private static FileSystemItemProperties toFileSystemItemProperties( BlobContainerItemProperties blobContainerItemProperties) { if (blobContainerItemProperties == null) { return null; } return new FileSystemItemProperties() .setETag(blobContainerItemProperties.getETag()) .setLastModified(blobContainerItemProperties.getLastModified()) .setLeaseStatus(toDataLakeLeaseStatusType(blobContainerItemProperties.getLeaseStatus())) .setLeaseState(toDataLakeLeaseStateType(blobContainerItemProperties.getLeaseState())) .setLeaseDuration(toDataLakeLeaseDurationType(blobContainerItemProperties.getLeaseDuration())) .setPublicAccess(toDataLakePublicAccessType(blobContainerItemProperties.getPublicAccess())) .setHasLegalHold(blobContainerItemProperties.isHasLegalHold()) .setHasImmutabilityPolicy(blobContainerItemProperties.isHasImmutabilityPolicy()) .setEncryptionScope(blobContainerItemProperties.getDefaultEncryptionScope()) .setEncryptionScopeOverridePrevented(blobContainerItemProperties.isEncryptionScopeOverridePrevented()); } static PathItem toPathItem(Path path) { if (path == null) { return null; } PathItem pathItem = new PathItem(path.getETag(), parseDateOrNull(path.getLastModified()), path.getContentLength() == null ? 0 : path.getContentLength(), path.getGroup(), path.isDirectory() != null && path.isDirectory(), path.getName(), path.getOwner(), path.getPermissions(), path.getCreationTime() == null ? null : fromWindowsFileTimeOrNull(Long.parseLong(path.getCreationTime())), path.getExpiryTime() == null ? null : fromWindowsFileTimeOrNull(Long.parseLong(path.getExpiryTime()))); return AccessorUtility.getPathItemAccessor().setPathItemProperties(pathItem, path.getEncryptionScope(), path.getEncryptionContext()); } private static OffsetDateTime parseDateOrNull(String date) { return date == null ? null : OffsetDateTime.parse(date, DateTimeFormatter.RFC_1123_DATE_TIME); } private static OffsetDateTime fromWindowsFileTimeOrNull(long fileTime) { if (fileTime == 0) { return null; } long fileTimeMs = fileTime / 10000; long fileTimeUnixEpoch = fileTimeMs - EPOCH_CONVERSION; return Instant.ofEpochMilli(fileTimeUnixEpoch).atOffset(ZoneOffset.UTC); } static BlobRequestConditions toBlobRequestConditions(DataLakeRequestConditions requestConditions) { if (requestConditions == null) { return null; } return new BlobRequestConditions() .setLeaseId(requestConditions.getLeaseId()) .setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince()) .setIfNoneMatch(requestConditions.getIfNoneMatch()) .setIfMatch(requestConditions.getIfMatch()) .setIfModifiedSince(requestConditions.getIfModifiedSince()); } static FileReadResponse toFileReadResponse(BlobDownloadResponse r) { if (r == null) { return null; } return new FileReadResponse(Transforms.toFileReadAsyncResponse(new BlobDownloadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r) { if (r == null) { return null; } return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toPathReadHeaders(r.getDeserializedHeaders(), getEncryptionContext(r))); } private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String encryptionContext) { if (h == null) { return null; } return new FileReadHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMd5(h.getBlobContentMD5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()) .setCreationTime(h.getCreationTime()) .setEncryptionContext(encryptionContext); } static List<BlobSignedIdentifier> toBlobIdentifierList(List<DataLakeSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<BlobSignedIdentifier> blobIdentifiers = new ArrayList<>(); for (DataLakeSignedIdentifier identifier : identifiers) { blobIdentifiers.add(Transforms.toBlobIdentifier(identifier)); } return blobIdentifiers; } private static BlobSignedIdentifier toBlobIdentifier(DataLakeSignedIdentifier identifier) { if (identifier == null) { return null; } return new BlobSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toBlobAccessPolicy(identifier.getAccessPolicy())); } private static BlobAccessPolicy toBlobAccessPolicy(DataLakeAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new BlobAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static FileSystemAccessPolicies toFileSystemAccessPolicies(BlobContainerAccessPolicies accessPolicies) { if (accessPolicies == null) { return null; } return new FileSystemAccessPolicies(Transforms.toDataLakePublicAccessType(accessPolicies.getBlobAccessType()), Transforms.toDataLakeIdentifierList(accessPolicies.getIdentifiers())); } static List<DataLakeSignedIdentifier> toDataLakeIdentifierList(List<BlobSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<DataLakeSignedIdentifier> dataLakeIdentifiers = new ArrayList<>(); for (BlobSignedIdentifier identifier : identifiers) { dataLakeIdentifiers.add(Transforms.toDataLakeIdentifier(identifier)); } return dataLakeIdentifiers; } private static DataLakeSignedIdentifier toDataLakeIdentifier(BlobSignedIdentifier identifier) { if (identifier == null) { return null; } return new DataLakeSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toDataLakeAccessPolicy(identifier.getAccessPolicy())); } private static DataLakeAccessPolicy toDataLakeAccessPolicy(BlobAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new DataLakeAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static BlobQuerySerialization toBlobQuerySerialization(FileQuerySerialization ser) { if (ser == null) { return null; } if (ser instanceof FileQueryJsonSerialization) { FileQueryJsonSerialization jsonSer = (FileQueryJsonSerialization) ser; return new BlobQueryJsonSerialization().setRecordSeparator(jsonSer.getRecordSeparator()); } else if (ser instanceof FileQueryDelimitedSerialization) { FileQueryDelimitedSerialization delSer = (FileQueryDelimitedSerialization) ser; return new BlobQueryDelimitedSerialization() .setColumnSeparator(delSer.getColumnSeparator()) .setEscapeChar(delSer.getEscapeChar()) .setFieldQuote(delSer.getFieldQuote()) .setHeadersPresent(delSer.isHeadersPresent()) .setRecordSeparator(delSer.getRecordSeparator()); } else if (ser instanceof FileQueryArrowSerialization) { FileQueryArrowSerialization arrSer = (FileQueryArrowSerialization) ser; return new BlobQueryArrowSerialization().setSchema(toBlobQueryArrowSchema(arrSer.getSchema())); } else if (ser instanceof FileQueryParquetSerialization) { return new BlobQueryParquetSerialization(); } else { throw new IllegalArgumentException(SERIALIZATION_MESSAGE); } } private static List<BlobQueryArrowField> toBlobQueryArrowSchema(List<FileQueryArrowField> schema) { if (schema == null) { return null; } List<BlobQueryArrowField> blobSchema = new ArrayList<>(schema.size()); for (FileQueryArrowField field : schema) { blobSchema.add(toBlobQueryArrowField(field)); } return blobSchema; } private static BlobQueryArrowField toBlobQueryArrowField(FileQueryArrowField field) { if (field == null) { return null; } return new BlobQueryArrowField(BlobQueryArrowFieldType.fromString(field.getType().toString())) .setName(field.getName()) .setPrecision(field.getPrecision()) .setScale(field.getScale()); } static Consumer<BlobQueryError> toBlobQueryErrorConsumer(Consumer<FileQueryError> er) { if (er == null) { return null; } return error -> er.accept(toFileQueryError(error)); } static Consumer<BlobQueryProgress> toBlobQueryProgressConsumer(Consumer<FileQueryProgress> pr) { if (pr == null) { return null; } return progress -> pr.accept(toFileQueryProgress(progress)); } private static FileQueryError toFileQueryError(BlobQueryError error) { if (error == null) { return null; } return new FileQueryError(error.isFatal(), error.getName(), error.getDescription(), error.getPosition()); } private static FileQueryProgress toFileQueryProgress(BlobQueryProgress progress) { if (progress == null) { return null; } return new FileQueryProgress(progress.getBytesScanned(), progress.getTotalBytes()); } static FileQueryResponse toFileQueryResponse(BlobQueryResponse r) { if (r == null) { return null; } return new FileQueryResponse(Transforms.toFileQueryAsyncResponse(new BlobQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileQueryAsyncResponse toFileQueryAsyncResponse(BlobQueryAsyncResponse r) { if (r == null) { return null; } return new FileQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toFileQueryHeaders(r.getDeserializedHeaders())); } private static FileQueryHeaders toFileQueryHeaders(BlobQueryHeaders h) { if (h == null) { return null; } return new FileQueryHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMd5(h.getContentMd5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()); } static BlobQueryOptions toBlobQueryOptions(FileQueryOptions options) { if (options == null) { return null; } if (options.getOutputStream() == null) { return new BlobQueryOptions(options.getExpression()) .setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization())) .setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization())) .setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions())) .setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer())) .setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer())); } else { return new BlobQueryOptions(options.getExpression(), options.getOutputStream()) .setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization())) .setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization())) .setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions())) .setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer())) .setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer())); } } static UndeleteBlobContainerOptions toBlobContainerUndeleteOptions(FileSystemUndeleteOptions options) { if (options == null) { return null; } return new UndeleteBlobContainerOptions(options.getDeletedFileSystemName(), options.getDeletedFileSystemVersion()).setDestinationContainerName(options.getDestinationFileSystemName()); } static DataLakeServiceProperties toDataLakeServiceProperties(BlobServiceProperties blobProps) { if (blobProps == null) { return null; } return new DataLakeServiceProperties() .setDefaultServiceVersion(blobProps.getDefaultServiceVersion()) .setCors(blobProps.getCors().stream().map(Transforms::toDataLakeCorsRule).collect(Collectors.toList())) .setDeleteRetentionPolicy(toDataLakeRetentionPolicy(blobProps.getDeleteRetentionPolicy())) .setHourMetrics(toDataLakeMetrics(blobProps.getHourMetrics())) .setMinuteMetrics(toDataLakeMetrics(blobProps.getMinuteMetrics())) .setLogging(toDataLakeAnalyticsLogging(blobProps.getLogging())) .setStaticWebsite(toDataLakeStaticWebsite(blobProps.getStaticWebsite())); } static DataLakeStaticWebsite toDataLakeStaticWebsite(StaticWebsite staticWebsite) { if (staticWebsite == null) { return null; } return new DataLakeStaticWebsite() .setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath()) .setEnabled(staticWebsite.isEnabled()) .setErrorDocument404Path(staticWebsite.getErrorDocument404Path()) .setIndexDocument(staticWebsite.getIndexDocument()); } static DataLakeAnalyticsLogging toDataLakeAnalyticsLogging(BlobAnalyticsLogging blobLogging) { if (blobLogging == null) { return null; } return new DataLakeAnalyticsLogging() .setDelete(blobLogging.isDelete()) .setRead(blobLogging.isRead()) .setWrite(blobLogging.isWrite()) .setRetentionPolicy(toDataLakeRetentionPolicy(blobLogging.getRetentionPolicy())) .setVersion(blobLogging.getVersion()); } static DataLakeCorsRule toDataLakeCorsRule(BlobCorsRule blobRule) { if (blobRule == null) { return null; } return new DataLakeCorsRule() .setAllowedHeaders(blobRule.getAllowedHeaders()) .setAllowedMethods(blobRule.getAllowedMethods()) .setAllowedOrigins(blobRule.getAllowedOrigins()) .setExposedHeaders(blobRule.getExposedHeaders()) .setMaxAgeInSeconds(blobRule.getMaxAgeInSeconds()); } static DataLakeMetrics toDataLakeMetrics(BlobMetrics blobMetrics) { if (blobMetrics == null) { return null; } return new DataLakeMetrics() .setEnabled(blobMetrics.isEnabled()) .setIncludeApis(blobMetrics.isIncludeApis()) .setVersion(blobMetrics.getVersion()) .setRetentionPolicy(toDataLakeRetentionPolicy(blobMetrics.getRetentionPolicy())); } static DataLakeRetentionPolicy toDataLakeRetentionPolicy(BlobRetentionPolicy blobPolicy) { if (blobPolicy == null) { return null; } return new DataLakeRetentionPolicy() .setDays(blobPolicy.getDays()) .setEnabled(blobPolicy.isEnabled()); } static BlobServiceProperties toBlobServiceProperties(DataLakeServiceProperties datalakeProperties) { if (datalakeProperties == null) { return null; } return new BlobServiceProperties() .setDefaultServiceVersion(datalakeProperties.getDefaultServiceVersion()) .setCors(datalakeProperties.getCors().stream().map(Transforms::toBlobCorsRule).collect(Collectors.toList())) .setDeleteRetentionPolicy(toBlobRetentionPolicy(datalakeProperties.getDeleteRetentionPolicy())) .setHourMetrics(toBlobMetrics(datalakeProperties.getHourMetrics())) .setMinuteMetrics(toBlobMetrics(datalakeProperties.getMinuteMetrics())) .setLogging(toBlobAnalyticsLogging(datalakeProperties.getLogging())) .setStaticWebsite(toBlobStaticWebsite(datalakeProperties.getStaticWebsite())); } static StaticWebsite toBlobStaticWebsite(DataLakeStaticWebsite staticWebsite) { if (staticWebsite == null) { return null; } return new StaticWebsite() .setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath()) .setEnabled(staticWebsite.isEnabled()) .setErrorDocument404Path(staticWebsite.getErrorDocument404Path()) .setIndexDocument(staticWebsite.getIndexDocument()); } static BlobAnalyticsLogging toBlobAnalyticsLogging(DataLakeAnalyticsLogging datalakeLogging) { if (datalakeLogging == null) { return null; } return new BlobAnalyticsLogging() .setDelete(datalakeLogging.isDelete()) .setRead(datalakeLogging.isRead()) .setWrite(datalakeLogging.isWrite()) .setRetentionPolicy(toBlobRetentionPolicy(datalakeLogging.getRetentionPolicy())) .setVersion(datalakeLogging.getVersion()); } static BlobCorsRule toBlobCorsRule(DataLakeCorsRule datalakeRule) { if (datalakeRule == null) { return null; } return new BlobCorsRule() .setAllowedHeaders(datalakeRule.getAllowedHeaders()) .setAllowedMethods(datalakeRule.getAllowedMethods()) .setAllowedOrigins(datalakeRule.getAllowedOrigins()) .setExposedHeaders(datalakeRule.getExposedHeaders()) .setMaxAgeInSeconds(datalakeRule.getMaxAgeInSeconds()); } static BlobMetrics toBlobMetrics(DataLakeMetrics datalakeMetrics) { if (datalakeMetrics == null) { return null; } return new BlobMetrics() .setEnabled(datalakeMetrics.isEnabled()) .setIncludeApis(datalakeMetrics.isIncludeApis()) .setVersion(datalakeMetrics.getVersion()) .setRetentionPolicy(toBlobRetentionPolicy(datalakeMetrics.getRetentionPolicy())); } static BlobRetentionPolicy toBlobRetentionPolicy(DataLakeRetentionPolicy datalakePolicy) { if (datalakePolicy == null) { return null; } return new BlobRetentionPolicy() .setDays(datalakePolicy.getDays()) .setEnabled(datalakePolicy.isEnabled()); } static PathDeletedItem toPathDeletedItem(BlobItemInternal blobItem) { if (blobItem == null) { return null; } return new PathDeletedItem(blobItem.getName(), false, blobItem.getDeletionId(), blobItem.getProperties().getDeletedTime(), blobItem.getProperties().getRemainingRetentionDays()); } static PathDeletedItem toPathDeletedItem(BlobPrefix blobPrefix) { return new PathDeletedItem(blobPrefix.getName(), true, null, null, null); } static CustomerProvidedKey toBlobCustomerProvidedKey( com.azure.storage.file.datalake.models.CustomerProvidedKey key) { if (key == null) { return null; } return new CustomerProvidedKey(key.getKey()); } static CpkInfo fromBlobCpkInfo(com.azure.storage.blob.models.CpkInfo info) { if (info == null) { return null; } return new CpkInfo() .setEncryptionKey(info.getEncryptionKey()) .setEncryptionAlgorithm(com.azure.storage.file.datalake.models.EncryptionAlgorithmType.fromString( info.getEncryptionAlgorithm().toString())) .setEncryptionKeySha256(info.getEncryptionKeySha256()); } static BlobContainerEncryptionScope toBlobContainerEncryptionScope(FileSystemEncryptionScopeOptions fileSystemEncryptionScope) { if (fileSystemEncryptionScope == null) { return null; } return new BlobContainerEncryptionScope() .setDefaultEncryptionScope(fileSystemEncryptionScope.getDefaultEncryptionScope()) .setEncryptionScopeOverridePrevented(fileSystemEncryptionScope.isEncryptionScopeOverridePrevented()); } }
Now that we have separate builders for both clients, these `buildClient()` methods can be attached to the fluent builder methods. ```java LoadTestAdministrationClient adminClient = new LoadTestAdministrationClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); LoadTestRunClient testRunClient = new LoadTestRunClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); ```
public static void main(String[] args) { LoadTestAdministrationClientBuilder adminBuilder = new LoadTestAdministrationClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>"); LoadTestRunClientBuilder testRunBuilder = new LoadTestRunClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>"); LoadTestAdministrationClient adminClient = adminBuilder.buildClient(); LoadTestRunClient testRunClient = testRunBuilder.buildClient(); final String testId = "6758667a-a57c-47e5-9cef-9b1f1432daca"; final String testRunId = "f758667a-c5ac-269a-dce1-5c1f14f2d142"; final String testFileName = "test-script.jmx"; final String testFilePath = "C:/path/to/file/sample-script.jmx"; /* * BEGIN: Create test */ Map<String, Object> testMap = new HashMap<String, Object>(); testMap.put("displayName", "Sample Display Name"); testMap.put("description", "Sample Description"); Map<String, Object> loadTestConfigMap = new HashMap<String, Object>(); loadTestConfigMap.put("engineInstances", 1); testMap.put("loadTestConfiguration", loadTestConfigMap); Map<String, Object> envVarMap = new HashMap<String, Object>(); envVarMap.put("a", "b"); envVarMap.put("x", "y"); testMap.put("environmentVariables", envVarMap); Map<String, Object> secretMap = new HashMap<String, Object>(); Map<String, Object> sampleSecretMap = new HashMap<String, Object>(); sampleSecretMap.put("value", "https: sampleSecretMap.put("type", "AKV_SECRET_URI"); secretMap.put("sampleSecret", sampleSecretMap); testMap.put("secrets", secretMap); Map<String, Object> passFailMap = new HashMap<String, Object>(); Map<String, Object> passFailMetrics = new HashMap<String, Object>(); Map<String, Object> samplePassFailMetric = new HashMap<String, Object>(); samplePassFailMetric.put("clientmetric", "response_time_ms"); samplePassFailMetric.put("aggregate", "percentage"); samplePassFailMetric.put("condition", ">"); samplePassFailMetric.put("value", "20"); samplePassFailMetric.put("action", "continue"); passFailMetrics.put("fefd759d-7fe8-4f83-8b6d-aeebe0f491fe", samplePassFailMetric); passFailMap.put("passFailMetrics", passFailMetrics); testMap.put("passFailCriteria", passFailMap); BinaryData test = BinaryData.fromObject(testMap); Response<BinaryData> testOutResponse = adminClient.createOrUpdateTestWithResponse(testId, test, null); System.out.println(testOutResponse.getValue().toString()); /* * END: Create test */ /* * BEGIN: Upload test file */ BinaryData fileData = BinaryData.fromFile(new File(testFilePath).toPath()); PollResponse<BinaryData> fileUrlOut = adminClient.beginUploadTestFile(testId, testFileName, fileData, null).waitForCompletion(Duration.ofMinutes(2)); System.out.println(fileUrlOut.getValue().toString()); /* * END: Upload test file */ /* * BEGIN: Start test run */ Map<String, Object> testRunMap = new HashMap<String, Object>(); testRunMap.put("testId", testId); testRunMap.put("displayName", "SDK-Created-TestRun"); BinaryData testRun = BinaryData.fromObject(testRunMap); SyncPoller<BinaryData, BinaryData> testRunPoller = testRunClient.beginTestRun(testRunId, testRun, null); System.out.println(testRunPoller.poll().getValue().toString()); /* * END: Start test run */ /* * BEGIN: Stop test run */ try { Thread.sleep(10 * 1000); } catch (InterruptedException e) { } Response<BinaryData> stoppedTestRunOut = testRunClient.stopTestRunWithResponse(testRunId, null); System.out.println(stoppedTestRunOut.getValue().toString()); /* * END: Stop test run */ /* * BEGIN: List metrics */ PollResponse<BinaryData> testRunOut = testRunPoller.poll(); JsonNode testRunJson = null; String testStatus = null, startDateTime = null, endDateTime = null; while (!testRunOut.getStatus().isComplete()) { testRunOut = testRunPoller.poll(); try { testRunJson = new ObjectMapper().readTree(testRunOut.getValue().toString()); testStatus = testRunJson.get("status").asText(); System.out.println("Status of test run: " + testStatus); } catch (JsonProcessingException e) { e.printStackTrace(); } try { Thread.sleep(5 * 1000); } catch (InterruptedException e) { } } try { testRunJson = new ObjectMapper().readTree(testRunPoller.getFinalResult().toString()); startDateTime = testRunJson.get("startDateTime").asText(); endDateTime = testRunJson.get("endDateTime").asText(); } catch (JsonProcessingException e) { e.printStackTrace(); } Response<BinaryData> metricNamespacesOut = testRunClient.getMetricNamespacesWithResponse(testRunId, null); String metricNamespace = null; try { JsonNode metricNamespacesJson = new ObjectMapper().readTree(metricNamespacesOut.getValue().toString()); metricNamespace = metricNamespacesJson.get("value").get(0).get("metricNamespaceName").asText(); } catch (JsonProcessingException e) { e.printStackTrace(); } Response<BinaryData> metricDefinitionsOut = testRunClient.getMetricDefinitionsWithResponse(testRunId, metricNamespace, null); String metricName = null; try { JsonNode metricDefinitionsJson = new ObjectMapper().readTree(metricDefinitionsOut.getValue().toString()); metricName = metricDefinitionsJson.get("value").get(0).get("name").get("value").asText(); } catch (JsonProcessingException e) { e.printStackTrace(); } PagedIterable<BinaryData> clientMetricsOut = testRunClient.listMetrics(testRunId, metricName, metricNamespace, startDateTime + '/' + endDateTime, null); clientMetricsOut.forEach((clientMetric) -> { System.out.println(clientMetric.toString()); }); /* * END: List metrics */ }
LoadTestRunClient testRunClient = testRunBuilder.buildClient();
public static void main(String[] args) { LoadTestAdministrationClient adminClient = new LoadTestAdministrationClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); LoadTestRunClient testRunClient = new LoadTestRunClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); final String testId = "6758667a-a57c-47e5-9cef-9b1f1432daca"; final String testRunId = "f758667a-c5ac-269a-dce1-5c1f14f2d142"; final String testFileName = "test-script.jmx"; final String testFilePath = "C:/path/to/file/sample-script.jmx"; /* * BEGIN: Create test */ Map<String, Object> testMap = new HashMap<String, Object>(); testMap.put("displayName", "Sample Display Name"); testMap.put("description", "Sample Description"); Map<String, Object> loadTestConfigMap = new HashMap<String, Object>(); loadTestConfigMap.put("engineInstances", 1); testMap.put("loadTestConfiguration", loadTestConfigMap); Map<String, Object> envVarMap = new HashMap<String, Object>(); envVarMap.put("a", "b"); envVarMap.put("x", "y"); testMap.put("environmentVariables", envVarMap); Map<String, Object> secretMap = new HashMap<String, Object>(); Map<String, Object> sampleSecretMap = new HashMap<String, Object>(); sampleSecretMap.put("value", "https: sampleSecretMap.put("type", "AKV_SECRET_URI"); secretMap.put("sampleSecret", sampleSecretMap); testMap.put("secrets", secretMap); Map<String, Object> passFailMap = new HashMap<String, Object>(); Map<String, Object> passFailMetrics = new HashMap<String, Object>(); Map<String, Object> samplePassFailMetric = new HashMap<String, Object>(); samplePassFailMetric.put("clientmetric", "response_time_ms"); samplePassFailMetric.put("aggregate", "percentage"); samplePassFailMetric.put("condition", ">"); samplePassFailMetric.put("value", "20"); samplePassFailMetric.put("action", "continue"); passFailMetrics.put("fefd759d-7fe8-4f83-8b6d-aeebe0f491fe", samplePassFailMetric); passFailMap.put("passFailMetrics", passFailMetrics); testMap.put("passFailCriteria", passFailMap); BinaryData test = BinaryData.fromObject(testMap); Response<BinaryData> testOutResponse = adminClient.createOrUpdateTestWithResponse(testId, test, null); System.out.println(testOutResponse.getValue().toString()); /* * END: Create test */ /* * BEGIN: Upload test file */ BinaryData fileData = BinaryData.fromFile(new File(testFilePath).toPath()); PollResponse<BinaryData> fileUrlOut = adminClient.beginUploadTestFile(testId, testFileName, fileData, null).waitForCompletion(Duration.ofMinutes(2)); System.out.println(fileUrlOut.getValue().toString()); /* * END: Upload test file */ /* * BEGIN: Start test run */ Map<String, Object> testRunMap = new HashMap<String, Object>(); testRunMap.put("testId", testId); testRunMap.put("displayName", "SDK-Created-TestRun"); BinaryData testRun = BinaryData.fromObject(testRunMap); SyncPoller<BinaryData, BinaryData> testRunPoller = testRunClient.beginTestRun(testRunId, testRun, null); System.out.println(testRunPoller.poll().getValue().toString()); /* * END: Start test run */ /* * BEGIN: Stop test run */ try { Thread.sleep(10 * 1000); } catch (InterruptedException e) { } Response<BinaryData> stoppedTestRunOut = testRunClient.stopTestRunWithResponse(testRunId, null); System.out.println(stoppedTestRunOut.getValue().toString()); /* * END: Stop test run */ /* * BEGIN: List metrics */ PollResponse<BinaryData> testRunOut = testRunPoller.poll(); JsonNode testRunJson = null; String testStatus = null, startDateTime = null, endDateTime = null; while (!testRunOut.getStatus().isComplete()) { testRunOut = testRunPoller.poll(); try { testRunJson = new ObjectMapper().readTree(testRunOut.getValue().toString()); testStatus = testRunJson.get("status").asText(); System.out.println("Status of test run: " + testStatus); } catch (JsonProcessingException e) { e.printStackTrace(); } try { Thread.sleep(5 * 1000); } catch (InterruptedException e) { } } try { testRunJson = new ObjectMapper().readTree(testRunPoller.getFinalResult().toString()); startDateTime = testRunJson.get("startDateTime").asText(); endDateTime = testRunJson.get("endDateTime").asText(); } catch (JsonProcessingException e) { e.printStackTrace(); } Response<BinaryData> metricNamespacesOut = testRunClient.getMetricNamespacesWithResponse(testRunId, null); String metricNamespace = null; try { JsonNode metricNamespacesJson = new ObjectMapper().readTree(metricNamespacesOut.getValue().toString()); metricNamespace = metricNamespacesJson.get("value").get(0).get("metricNamespaceName").asText(); } catch (JsonProcessingException e) { e.printStackTrace(); } Response<BinaryData> metricDefinitionsOut = testRunClient.getMetricDefinitionsWithResponse(testRunId, metricNamespace, null); String metricName = null; try { JsonNode metricDefinitionsJson = new ObjectMapper().readTree(metricDefinitionsOut.getValue().toString()); metricName = metricDefinitionsJson.get("value").get(0).get("name").get("value").asText(); } catch (JsonProcessingException e) { e.printStackTrace(); } PagedIterable<BinaryData> clientMetricsOut = testRunClient.listMetrics(testRunId, metricName, metricNamespace, startDateTime + '/' + endDateTime, null); clientMetricsOut.forEach((clientMetric) -> { System.out.println(clientMetric.toString()); }); /* * END: List metrics */ }
class HelloWorld { /** * Authenticates with the load testing resource and shows how to list tests, test files and test runs * for a given resource. * * @param args Unused. Arguments to the program. * * @throws ClientAuthenticationException - when the credentials have insufficient permissions for load test resource. * @throws ResourceNotFoundException - when test with `testId` does not exist when listing files. */ }
class HelloWorld { /** * Authenticates with the load testing resource and shows how to list tests, test files and test runs * for a given resource. * * @param args Unused. Arguments to the program. * * @throws ClientAuthenticationException - when the credentials have insufficient permissions for load test resource. * @throws ResourceNotFoundException - when test with `testId` does not exist when listing files. */ }
Same as above - don't need to have a builder variable and just call `buildClient()` after `endpoint()`
public void auth() { TokenCredential credential = new DefaultAzureCredentialBuilder() .build(); LoadTestAdministrationClientBuilder adminBuilder = new LoadTestAdministrationClientBuilder() .credential(credential) .endpoint("<Enter Azure Load Testing Data-Plane URL>"); LoadTestRunClientBuilder testRunBuilder = new LoadTestRunClientBuilder() .credential(credential) .endpoint("<Enter Azure Load Testing Data-Plane URL>"); LoadTestAdministrationClient adminClient = adminBuilder.buildClient(); LoadTestRunClient testRunClient = testRunBuilder.buildClient(); RequestOptions reqOpts = new RequestOptions() .addQueryParam("orderBy", "lastModifiedDateTime") .addQueryParam("maxPageSize", "10"); adminClient.listTests(reqOpts); reqOpts = new RequestOptions() .addQueryParam("orderBy", "lastModifiedDateTime") .addQueryParam("status", "EXECUTING,DONE") .addQueryParam("maxPageSize", "10"); testRunClient.listTestRuns(reqOpts); }
LoadTestRunClient testRunClient = testRunBuilder.buildClient();
public void auth() { TokenCredential credential = new DefaultAzureCredentialBuilder() .build(); LoadTestAdministrationClient adminClient = new LoadTestAdministrationClientBuilder() .credential(credential) .endpoint("<Enter Azure Load Testing Data-Plane URL>") .buildClient(); LoadTestRunClient testRunClient = new LoadTestRunClientBuilder() .credential(credential) .endpoint("<Enter Azure Load Testing Data-Plane URL>") .buildClient(); RequestOptions reqOpts = new RequestOptions() .addQueryParam("orderBy", "lastModifiedDateTime") .addQueryParam("maxPageSize", "10"); adminClient.listTests(reqOpts); reqOpts = new RequestOptions() .addQueryParam("orderBy", "lastModifiedDateTime") .addQueryParam("status", "EXECUTING,DONE") .addQueryParam("maxPageSize", "10"); testRunClient.listTestRuns(reqOpts); }
class ReadmeSamples { public void createTest() { LoadTestAdministrationClient adminClient = new LoadTestAdministrationClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); Map<String, Object> testMap = new HashMap<String, Object>(); testMap.put("displayName", "Sample Display Name"); testMap.put("description", "Sample Description"); Map<String, Object> loadTestConfigMap = new HashMap<String, Object>(); loadTestConfigMap.put("engineInstances", 1); testMap.put("loadTestConfiguration", loadTestConfigMap); Map<String, Object> envVarMap = new HashMap<String, Object>(); envVarMap.put("a", "b"); envVarMap.put("x", "y"); testMap.put("environmentVariables", envVarMap); Map<String, Object> secretMap = new HashMap<String, Object>(); Map<String, Object> sampleSecretMap = new HashMap<String, Object>(); sampleSecretMap.put("value", "https: sampleSecretMap.put("type", "AKV_SECRET_URI"); secretMap.put("sampleSecret", sampleSecretMap); testMap.put("secrets", secretMap); Map<String, Object> passFailMap = new HashMap<String, Object>(); Map<String, Object> passFailMetrics = new HashMap<String, Object>(); Map<String, Object> samplePassFailMetric = new HashMap<String, Object>(); samplePassFailMetric.put("clientmetric", "response_time_ms"); samplePassFailMetric.put("aggregate", "percentage"); samplePassFailMetric.put("condition", ">"); samplePassFailMetric.put("value", "20"); samplePassFailMetric.put("action", "continue"); passFailMetrics.put("fefd759d-7fe8-4f83-8b6d-aeebe0f491fe", samplePassFailMetric); passFailMap.put("passFailMetrics", passFailMetrics); testMap.put("passFailCriteria", passFailMap); BinaryData test = BinaryData.fromObject(testMap); Response<BinaryData> testOutResponse = adminClient.createOrUpdateTestWithResponse("test12345", test, null); System.out.println(testOutResponse.getValue().toString()); } public void uploadTestFile() throws IOException { LoadTestAdministrationClient adminClient = new LoadTestAdministrationClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); BinaryData fileData = BinaryData.fromFile(new File("path/to/file").toPath()); Response<BinaryData> fileUrlOut = adminClient.uploadTestFileWithResponse("test12345", "sample-file.jmx", fileData, null); System.out.println(fileUrlOut.getValue().toString()); } public void runTest() { LoadTestRunClient testRunClient = new LoadTestRunClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); Map<String, Object> testRunMap = new HashMap<String, Object>(); testRunMap.put("testId", "test12345"); testRunMap.put("displayName", "SDK-Created-TestRun"); BinaryData testRun = BinaryData.fromObject(testRunMap); SyncPoller<BinaryData, BinaryData> poller = testRunClient.beginTestRun("testrun12345", testRun, null); Duration pollInterval = Duration.ofSeconds(5); poller = poller.setPollInterval(pollInterval); JsonNode testRunJson = null; String testStatus; PollResponse<BinaryData> pollResponse = poller.poll(); while (pollResponse.getStatus() == LongRunningOperationStatus.IN_PROGRESS || pollResponse.getStatus() == LongRunningOperationStatus.NOT_STARTED) { try { testRunJson = new ObjectMapper().readTree(pollResponse.getValue().toString()); testStatus = testRunJson.get("status").asText(); System.out.println("Test run status: " + testStatus); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } try { Thread.sleep(pollInterval.toMillis()); } catch (InterruptedException e) { } pollResponse = poller.poll(); } poller.waitForCompletion(); BinaryData testRunBinary = poller.getFinalResult(); try { testRunJson = new ObjectMapper().readTree(testRunBinary.toString()); testStatus = testRunJson.get("status").asText(); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } String startDateTime = testRunJson.get("startDateTime").asText(); String endDateTime = testRunJson.get("endDateTime").asText(); Response<BinaryData> metricNamespacesOut = testRunClient.getMetricNamespacesWithResponse("testrun12345", null); String metricNamespace = null; try { JsonNode metricNamespacesJson = new ObjectMapper().readTree(metricNamespacesOut.getValue().toString()); metricNamespace = metricNamespacesJson.get("value").get(0).get("metricNamespaceName").asText(); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } Response<BinaryData> metricDefinitionsOut = testRunClient.getMetricDefinitionsWithResponse("testrun12345", metricNamespace, null); String metricName = null; try { JsonNode metricDefinitionsJson = new ObjectMapper().readTree(metricDefinitionsOut.getValue().toString()); metricName = metricDefinitionsJson.get("value").get(0).get("name").get("value").asText(); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } PagedIterable<BinaryData> clientMetricsOut = testRunClient.listMetrics("testrun12345", metricName, metricNamespace, startDateTime + '/' + endDateTime, null); clientMetricsOut.forEach((clientMetric) -> { System.out.println(clientMetric.toString()); }); } }
class ReadmeSamples { public void createTest() { LoadTestAdministrationClient adminClient = new LoadTestAdministrationClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); Map<String, Object> testMap = new HashMap<String, Object>(); testMap.put("displayName", "Sample Display Name"); testMap.put("description", "Sample Description"); Map<String, Object> loadTestConfigMap = new HashMap<String, Object>(); loadTestConfigMap.put("engineInstances", 1); testMap.put("loadTestConfiguration", loadTestConfigMap); Map<String, Object> envVarMap = new HashMap<String, Object>(); envVarMap.put("a", "b"); envVarMap.put("x", "y"); testMap.put("environmentVariables", envVarMap); Map<String, Object> secretMap = new HashMap<String, Object>(); Map<String, Object> sampleSecretMap = new HashMap<String, Object>(); sampleSecretMap.put("value", "https: sampleSecretMap.put("type", "AKV_SECRET_URI"); secretMap.put("sampleSecret", sampleSecretMap); testMap.put("secrets", secretMap); Map<String, Object> passFailMap = new HashMap<String, Object>(); Map<String, Object> passFailMetrics = new HashMap<String, Object>(); Map<String, Object> samplePassFailMetric = new HashMap<String, Object>(); samplePassFailMetric.put("clientmetric", "response_time_ms"); samplePassFailMetric.put("aggregate", "percentage"); samplePassFailMetric.put("condition", ">"); samplePassFailMetric.put("value", "20"); samplePassFailMetric.put("action", "continue"); passFailMetrics.put("fefd759d-7fe8-4f83-8b6d-aeebe0f491fe", samplePassFailMetric); passFailMap.put("passFailMetrics", passFailMetrics); testMap.put("passFailCriteria", passFailMap); BinaryData test = BinaryData.fromObject(testMap); Response<BinaryData> testOutResponse = adminClient.createOrUpdateTestWithResponse("test12345", test, null); System.out.println(testOutResponse.getValue().toString()); } public void uploadTestFile() throws IOException { LoadTestAdministrationClient adminClient = new LoadTestAdministrationClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); BinaryData fileData = BinaryData.fromFile(new File("path/to/file").toPath()); Response<BinaryData> fileUrlOut = adminClient.uploadTestFileWithResponse("test12345", "sample-file.jmx", fileData, null); System.out.println(fileUrlOut.getValue().toString()); } public void runTest() { LoadTestRunClient testRunClient = new LoadTestRunClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); Map<String, Object> testRunMap = new HashMap<String, Object>(); testRunMap.put("testId", "test12345"); testRunMap.put("displayName", "SDK-Created-TestRun"); BinaryData testRun = BinaryData.fromObject(testRunMap); SyncPoller<BinaryData, BinaryData> poller = testRunClient.beginTestRun("testrun12345", testRun, null); Duration pollInterval = Duration.ofSeconds(5); poller = poller.setPollInterval(pollInterval); JsonNode testRunJson = null; String testStatus; PollResponse<BinaryData> pollResponse = poller.poll(); while (pollResponse.getStatus() == LongRunningOperationStatus.IN_PROGRESS || pollResponse.getStatus() == LongRunningOperationStatus.NOT_STARTED) { try { testRunJson = new ObjectMapper().readTree(pollResponse.getValue().toString()); testStatus = testRunJson.get("status").asText(); System.out.println("Test run status: " + testStatus); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } try { Thread.sleep(pollInterval.toMillis()); } catch (InterruptedException e) { } pollResponse = poller.poll(); } poller.waitForCompletion(); BinaryData testRunBinary = poller.getFinalResult(); try { testRunJson = new ObjectMapper().readTree(testRunBinary.toString()); testStatus = testRunJson.get("status").asText(); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } String startDateTime = testRunJson.get("startDateTime").asText(); String endDateTime = testRunJson.get("endDateTime").asText(); Response<BinaryData> metricNamespacesOut = testRunClient.getMetricNamespacesWithResponse("testrun12345", null); String metricNamespace = null; try { JsonNode metricNamespacesJson = new ObjectMapper().readTree(metricNamespacesOut.getValue().toString()); metricNamespace = metricNamespacesJson.get("value").get(0).get("metricNamespaceName").asText(); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } Response<BinaryData> metricDefinitionsOut = testRunClient.getMetricDefinitionsWithResponse("testrun12345", metricNamespace, null); String metricName = null; try { JsonNode metricDefinitionsJson = new ObjectMapper().readTree(metricDefinitionsOut.getValue().toString()); metricName = metricDefinitionsJson.get("value").get(0).get("name").get("value").asText(); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } PagedIterable<BinaryData> clientMetricsOut = testRunClient.listMetrics("testrun12345", metricName, metricNamespace, startDateTime + '/' + endDateTime, null); clientMetricsOut.forEach((clientMetric) -> { System.out.println(clientMetric.toString()); }); } }
Done
public void auth() { TokenCredential credential = new DefaultAzureCredentialBuilder() .build(); LoadTestAdministrationClientBuilder adminBuilder = new LoadTestAdministrationClientBuilder() .credential(credential) .endpoint("<Enter Azure Load Testing Data-Plane URL>"); LoadTestRunClientBuilder testRunBuilder = new LoadTestRunClientBuilder() .credential(credential) .endpoint("<Enter Azure Load Testing Data-Plane URL>"); LoadTestAdministrationClient adminClient = adminBuilder.buildClient(); LoadTestRunClient testRunClient = testRunBuilder.buildClient(); RequestOptions reqOpts = new RequestOptions() .addQueryParam("orderBy", "lastModifiedDateTime") .addQueryParam("maxPageSize", "10"); adminClient.listTests(reqOpts); reqOpts = new RequestOptions() .addQueryParam("orderBy", "lastModifiedDateTime") .addQueryParam("status", "EXECUTING,DONE") .addQueryParam("maxPageSize", "10"); testRunClient.listTestRuns(reqOpts); }
LoadTestRunClient testRunClient = testRunBuilder.buildClient();
public void auth() { TokenCredential credential = new DefaultAzureCredentialBuilder() .build(); LoadTestAdministrationClient adminClient = new LoadTestAdministrationClientBuilder() .credential(credential) .endpoint("<Enter Azure Load Testing Data-Plane URL>") .buildClient(); LoadTestRunClient testRunClient = new LoadTestRunClientBuilder() .credential(credential) .endpoint("<Enter Azure Load Testing Data-Plane URL>") .buildClient(); RequestOptions reqOpts = new RequestOptions() .addQueryParam("orderBy", "lastModifiedDateTime") .addQueryParam("maxPageSize", "10"); adminClient.listTests(reqOpts); reqOpts = new RequestOptions() .addQueryParam("orderBy", "lastModifiedDateTime") .addQueryParam("status", "EXECUTING,DONE") .addQueryParam("maxPageSize", "10"); testRunClient.listTestRuns(reqOpts); }
class ReadmeSamples { public void createTest() { LoadTestAdministrationClient adminClient = new LoadTestAdministrationClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); Map<String, Object> testMap = new HashMap<String, Object>(); testMap.put("displayName", "Sample Display Name"); testMap.put("description", "Sample Description"); Map<String, Object> loadTestConfigMap = new HashMap<String, Object>(); loadTestConfigMap.put("engineInstances", 1); testMap.put("loadTestConfiguration", loadTestConfigMap); Map<String, Object> envVarMap = new HashMap<String, Object>(); envVarMap.put("a", "b"); envVarMap.put("x", "y"); testMap.put("environmentVariables", envVarMap); Map<String, Object> secretMap = new HashMap<String, Object>(); Map<String, Object> sampleSecretMap = new HashMap<String, Object>(); sampleSecretMap.put("value", "https: sampleSecretMap.put("type", "AKV_SECRET_URI"); secretMap.put("sampleSecret", sampleSecretMap); testMap.put("secrets", secretMap); Map<String, Object> passFailMap = new HashMap<String, Object>(); Map<String, Object> passFailMetrics = new HashMap<String, Object>(); Map<String, Object> samplePassFailMetric = new HashMap<String, Object>(); samplePassFailMetric.put("clientmetric", "response_time_ms"); samplePassFailMetric.put("aggregate", "percentage"); samplePassFailMetric.put("condition", ">"); samplePassFailMetric.put("value", "20"); samplePassFailMetric.put("action", "continue"); passFailMetrics.put("fefd759d-7fe8-4f83-8b6d-aeebe0f491fe", samplePassFailMetric); passFailMap.put("passFailMetrics", passFailMetrics); testMap.put("passFailCriteria", passFailMap); BinaryData test = BinaryData.fromObject(testMap); Response<BinaryData> testOutResponse = adminClient.createOrUpdateTestWithResponse("test12345", test, null); System.out.println(testOutResponse.getValue().toString()); } public void uploadTestFile() throws IOException { LoadTestAdministrationClient adminClient = new LoadTestAdministrationClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); BinaryData fileData = BinaryData.fromFile(new File("path/to/file").toPath()); Response<BinaryData> fileUrlOut = adminClient.uploadTestFileWithResponse("test12345", "sample-file.jmx", fileData, null); System.out.println(fileUrlOut.getValue().toString()); } public void runTest() { LoadTestRunClient testRunClient = new LoadTestRunClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); Map<String, Object> testRunMap = new HashMap<String, Object>(); testRunMap.put("testId", "test12345"); testRunMap.put("displayName", "SDK-Created-TestRun"); BinaryData testRun = BinaryData.fromObject(testRunMap); SyncPoller<BinaryData, BinaryData> poller = testRunClient.beginTestRun("testrun12345", testRun, null); Duration pollInterval = Duration.ofSeconds(5); poller = poller.setPollInterval(pollInterval); JsonNode testRunJson = null; String testStatus; PollResponse<BinaryData> pollResponse = poller.poll(); while (pollResponse.getStatus() == LongRunningOperationStatus.IN_PROGRESS || pollResponse.getStatus() == LongRunningOperationStatus.NOT_STARTED) { try { testRunJson = new ObjectMapper().readTree(pollResponse.getValue().toString()); testStatus = testRunJson.get("status").asText(); System.out.println("Test run status: " + testStatus); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } try { Thread.sleep(pollInterval.toMillis()); } catch (InterruptedException e) { } pollResponse = poller.poll(); } poller.waitForCompletion(); BinaryData testRunBinary = poller.getFinalResult(); try { testRunJson = new ObjectMapper().readTree(testRunBinary.toString()); testStatus = testRunJson.get("status").asText(); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } String startDateTime = testRunJson.get("startDateTime").asText(); String endDateTime = testRunJson.get("endDateTime").asText(); Response<BinaryData> metricNamespacesOut = testRunClient.getMetricNamespacesWithResponse("testrun12345", null); String metricNamespace = null; try { JsonNode metricNamespacesJson = new ObjectMapper().readTree(metricNamespacesOut.getValue().toString()); metricNamespace = metricNamespacesJson.get("value").get(0).get("metricNamespaceName").asText(); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } Response<BinaryData> metricDefinitionsOut = testRunClient.getMetricDefinitionsWithResponse("testrun12345", metricNamespace, null); String metricName = null; try { JsonNode metricDefinitionsJson = new ObjectMapper().readTree(metricDefinitionsOut.getValue().toString()); metricName = metricDefinitionsJson.get("value").get(0).get("name").get("value").asText(); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } PagedIterable<BinaryData> clientMetricsOut = testRunClient.listMetrics("testrun12345", metricName, metricNamespace, startDateTime + '/' + endDateTime, null); clientMetricsOut.forEach((clientMetric) -> { System.out.println(clientMetric.toString()); }); } }
class ReadmeSamples { public void createTest() { LoadTestAdministrationClient adminClient = new LoadTestAdministrationClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); Map<String, Object> testMap = new HashMap<String, Object>(); testMap.put("displayName", "Sample Display Name"); testMap.put("description", "Sample Description"); Map<String, Object> loadTestConfigMap = new HashMap<String, Object>(); loadTestConfigMap.put("engineInstances", 1); testMap.put("loadTestConfiguration", loadTestConfigMap); Map<String, Object> envVarMap = new HashMap<String, Object>(); envVarMap.put("a", "b"); envVarMap.put("x", "y"); testMap.put("environmentVariables", envVarMap); Map<String, Object> secretMap = new HashMap<String, Object>(); Map<String, Object> sampleSecretMap = new HashMap<String, Object>(); sampleSecretMap.put("value", "https: sampleSecretMap.put("type", "AKV_SECRET_URI"); secretMap.put("sampleSecret", sampleSecretMap); testMap.put("secrets", secretMap); Map<String, Object> passFailMap = new HashMap<String, Object>(); Map<String, Object> passFailMetrics = new HashMap<String, Object>(); Map<String, Object> samplePassFailMetric = new HashMap<String, Object>(); samplePassFailMetric.put("clientmetric", "response_time_ms"); samplePassFailMetric.put("aggregate", "percentage"); samplePassFailMetric.put("condition", ">"); samplePassFailMetric.put("value", "20"); samplePassFailMetric.put("action", "continue"); passFailMetrics.put("fefd759d-7fe8-4f83-8b6d-aeebe0f491fe", samplePassFailMetric); passFailMap.put("passFailMetrics", passFailMetrics); testMap.put("passFailCriteria", passFailMap); BinaryData test = BinaryData.fromObject(testMap); Response<BinaryData> testOutResponse = adminClient.createOrUpdateTestWithResponse("test12345", test, null); System.out.println(testOutResponse.getValue().toString()); } public void uploadTestFile() throws IOException { LoadTestAdministrationClient adminClient = new LoadTestAdministrationClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); BinaryData fileData = BinaryData.fromFile(new File("path/to/file").toPath()); Response<BinaryData> fileUrlOut = adminClient.uploadTestFileWithResponse("test12345", "sample-file.jmx", fileData, null); System.out.println(fileUrlOut.getValue().toString()); } public void runTest() { LoadTestRunClient testRunClient = new LoadTestRunClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); Map<String, Object> testRunMap = new HashMap<String, Object>(); testRunMap.put("testId", "test12345"); testRunMap.put("displayName", "SDK-Created-TestRun"); BinaryData testRun = BinaryData.fromObject(testRunMap); SyncPoller<BinaryData, BinaryData> poller = testRunClient.beginTestRun("testrun12345", testRun, null); Duration pollInterval = Duration.ofSeconds(5); poller = poller.setPollInterval(pollInterval); JsonNode testRunJson = null; String testStatus; PollResponse<BinaryData> pollResponse = poller.poll(); while (pollResponse.getStatus() == LongRunningOperationStatus.IN_PROGRESS || pollResponse.getStatus() == LongRunningOperationStatus.NOT_STARTED) { try { testRunJson = new ObjectMapper().readTree(pollResponse.getValue().toString()); testStatus = testRunJson.get("status").asText(); System.out.println("Test run status: " + testStatus); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } try { Thread.sleep(pollInterval.toMillis()); } catch (InterruptedException e) { } pollResponse = poller.poll(); } poller.waitForCompletion(); BinaryData testRunBinary = poller.getFinalResult(); try { testRunJson = new ObjectMapper().readTree(testRunBinary.toString()); testStatus = testRunJson.get("status").asText(); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } String startDateTime = testRunJson.get("startDateTime").asText(); String endDateTime = testRunJson.get("endDateTime").asText(); Response<BinaryData> metricNamespacesOut = testRunClient.getMetricNamespacesWithResponse("testrun12345", null); String metricNamespace = null; try { JsonNode metricNamespacesJson = new ObjectMapper().readTree(metricNamespacesOut.getValue().toString()); metricNamespace = metricNamespacesJson.get("value").get(0).get("metricNamespaceName").asText(); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } Response<BinaryData> metricDefinitionsOut = testRunClient.getMetricDefinitionsWithResponse("testrun12345", metricNamespace, null); String metricName = null; try { JsonNode metricDefinitionsJson = new ObjectMapper().readTree(metricDefinitionsOut.getValue().toString()); metricName = metricDefinitionsJson.get("value").get(0).get("name").get("value").asText(); } catch (JsonProcessingException e) { System.out.println("Error processing JSON response"); } PagedIterable<BinaryData> clientMetricsOut = testRunClient.listMetrics("testrun12345", metricName, metricNamespace, startDateTime + '/' + endDateTime, null); clientMetricsOut.forEach((clientMetric) -> { System.out.println(clientMetric.toString()); }); } }
Done
public static void main(String[] args) { LoadTestAdministrationClientBuilder adminBuilder = new LoadTestAdministrationClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>"); LoadTestRunClientBuilder testRunBuilder = new LoadTestRunClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>"); LoadTestAdministrationClient adminClient = adminBuilder.buildClient(); LoadTestRunClient testRunClient = testRunBuilder.buildClient(); final String testId = "6758667a-a57c-47e5-9cef-9b1f1432daca"; final String testRunId = "f758667a-c5ac-269a-dce1-5c1f14f2d142"; final String testFileName = "test-script.jmx"; final String testFilePath = "C:/path/to/file/sample-script.jmx"; /* * BEGIN: Create test */ Map<String, Object> testMap = new HashMap<String, Object>(); testMap.put("displayName", "Sample Display Name"); testMap.put("description", "Sample Description"); Map<String, Object> loadTestConfigMap = new HashMap<String, Object>(); loadTestConfigMap.put("engineInstances", 1); testMap.put("loadTestConfiguration", loadTestConfigMap); Map<String, Object> envVarMap = new HashMap<String, Object>(); envVarMap.put("a", "b"); envVarMap.put("x", "y"); testMap.put("environmentVariables", envVarMap); Map<String, Object> secretMap = new HashMap<String, Object>(); Map<String, Object> sampleSecretMap = new HashMap<String, Object>(); sampleSecretMap.put("value", "https: sampleSecretMap.put("type", "AKV_SECRET_URI"); secretMap.put("sampleSecret", sampleSecretMap); testMap.put("secrets", secretMap); Map<String, Object> passFailMap = new HashMap<String, Object>(); Map<String, Object> passFailMetrics = new HashMap<String, Object>(); Map<String, Object> samplePassFailMetric = new HashMap<String, Object>(); samplePassFailMetric.put("clientmetric", "response_time_ms"); samplePassFailMetric.put("aggregate", "percentage"); samplePassFailMetric.put("condition", ">"); samplePassFailMetric.put("value", "20"); samplePassFailMetric.put("action", "continue"); passFailMetrics.put("fefd759d-7fe8-4f83-8b6d-aeebe0f491fe", samplePassFailMetric); passFailMap.put("passFailMetrics", passFailMetrics); testMap.put("passFailCriteria", passFailMap); BinaryData test = BinaryData.fromObject(testMap); Response<BinaryData> testOutResponse = adminClient.createOrUpdateTestWithResponse(testId, test, null); System.out.println(testOutResponse.getValue().toString()); /* * END: Create test */ /* * BEGIN: Upload test file */ BinaryData fileData = BinaryData.fromFile(new File(testFilePath).toPath()); PollResponse<BinaryData> fileUrlOut = adminClient.beginUploadTestFile(testId, testFileName, fileData, null).waitForCompletion(Duration.ofMinutes(2)); System.out.println(fileUrlOut.getValue().toString()); /* * END: Upload test file */ /* * BEGIN: Start test run */ Map<String, Object> testRunMap = new HashMap<String, Object>(); testRunMap.put("testId", testId); testRunMap.put("displayName", "SDK-Created-TestRun"); BinaryData testRun = BinaryData.fromObject(testRunMap); SyncPoller<BinaryData, BinaryData> testRunPoller = testRunClient.beginTestRun(testRunId, testRun, null); System.out.println(testRunPoller.poll().getValue().toString()); /* * END: Start test run */ /* * BEGIN: Stop test run */ try { Thread.sleep(10 * 1000); } catch (InterruptedException e) { } Response<BinaryData> stoppedTestRunOut = testRunClient.stopTestRunWithResponse(testRunId, null); System.out.println(stoppedTestRunOut.getValue().toString()); /* * END: Stop test run */ /* * BEGIN: List metrics */ PollResponse<BinaryData> testRunOut = testRunPoller.poll(); JsonNode testRunJson = null; String testStatus = null, startDateTime = null, endDateTime = null; while (!testRunOut.getStatus().isComplete()) { testRunOut = testRunPoller.poll(); try { testRunJson = new ObjectMapper().readTree(testRunOut.getValue().toString()); testStatus = testRunJson.get("status").asText(); System.out.println("Status of test run: " + testStatus); } catch (JsonProcessingException e) { e.printStackTrace(); } try { Thread.sleep(5 * 1000); } catch (InterruptedException e) { } } try { testRunJson = new ObjectMapper().readTree(testRunPoller.getFinalResult().toString()); startDateTime = testRunJson.get("startDateTime").asText(); endDateTime = testRunJson.get("endDateTime").asText(); } catch (JsonProcessingException e) { e.printStackTrace(); } Response<BinaryData> metricNamespacesOut = testRunClient.getMetricNamespacesWithResponse(testRunId, null); String metricNamespace = null; try { JsonNode metricNamespacesJson = new ObjectMapper().readTree(metricNamespacesOut.getValue().toString()); metricNamespace = metricNamespacesJson.get("value").get(0).get("metricNamespaceName").asText(); } catch (JsonProcessingException e) { e.printStackTrace(); } Response<BinaryData> metricDefinitionsOut = testRunClient.getMetricDefinitionsWithResponse(testRunId, metricNamespace, null); String metricName = null; try { JsonNode metricDefinitionsJson = new ObjectMapper().readTree(metricDefinitionsOut.getValue().toString()); metricName = metricDefinitionsJson.get("value").get(0).get("name").get("value").asText(); } catch (JsonProcessingException e) { e.printStackTrace(); } PagedIterable<BinaryData> clientMetricsOut = testRunClient.listMetrics(testRunId, metricName, metricNamespace, startDateTime + '/' + endDateTime, null); clientMetricsOut.forEach((clientMetric) -> { System.out.println(clientMetric.toString()); }); /* * END: List metrics */ }
LoadTestRunClient testRunClient = testRunBuilder.buildClient();
public static void main(String[] args) { LoadTestAdministrationClient adminClient = new LoadTestAdministrationClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); LoadTestRunClient testRunClient = new LoadTestRunClientBuilder() .credential(new DefaultAzureCredentialBuilder().build()) .endpoint("<endpoint>") .buildClient(); final String testId = "6758667a-a57c-47e5-9cef-9b1f1432daca"; final String testRunId = "f758667a-c5ac-269a-dce1-5c1f14f2d142"; final String testFileName = "test-script.jmx"; final String testFilePath = "C:/path/to/file/sample-script.jmx"; /* * BEGIN: Create test */ Map<String, Object> testMap = new HashMap<String, Object>(); testMap.put("displayName", "Sample Display Name"); testMap.put("description", "Sample Description"); Map<String, Object> loadTestConfigMap = new HashMap<String, Object>(); loadTestConfigMap.put("engineInstances", 1); testMap.put("loadTestConfiguration", loadTestConfigMap); Map<String, Object> envVarMap = new HashMap<String, Object>(); envVarMap.put("a", "b"); envVarMap.put("x", "y"); testMap.put("environmentVariables", envVarMap); Map<String, Object> secretMap = new HashMap<String, Object>(); Map<String, Object> sampleSecretMap = new HashMap<String, Object>(); sampleSecretMap.put("value", "https: sampleSecretMap.put("type", "AKV_SECRET_URI"); secretMap.put("sampleSecret", sampleSecretMap); testMap.put("secrets", secretMap); Map<String, Object> passFailMap = new HashMap<String, Object>(); Map<String, Object> passFailMetrics = new HashMap<String, Object>(); Map<String, Object> samplePassFailMetric = new HashMap<String, Object>(); samplePassFailMetric.put("clientmetric", "response_time_ms"); samplePassFailMetric.put("aggregate", "percentage"); samplePassFailMetric.put("condition", ">"); samplePassFailMetric.put("value", "20"); samplePassFailMetric.put("action", "continue"); passFailMetrics.put("fefd759d-7fe8-4f83-8b6d-aeebe0f491fe", samplePassFailMetric); passFailMap.put("passFailMetrics", passFailMetrics); testMap.put("passFailCriteria", passFailMap); BinaryData test = BinaryData.fromObject(testMap); Response<BinaryData> testOutResponse = adminClient.createOrUpdateTestWithResponse(testId, test, null); System.out.println(testOutResponse.getValue().toString()); /* * END: Create test */ /* * BEGIN: Upload test file */ BinaryData fileData = BinaryData.fromFile(new File(testFilePath).toPath()); PollResponse<BinaryData> fileUrlOut = adminClient.beginUploadTestFile(testId, testFileName, fileData, null).waitForCompletion(Duration.ofMinutes(2)); System.out.println(fileUrlOut.getValue().toString()); /* * END: Upload test file */ /* * BEGIN: Start test run */ Map<String, Object> testRunMap = new HashMap<String, Object>(); testRunMap.put("testId", testId); testRunMap.put("displayName", "SDK-Created-TestRun"); BinaryData testRun = BinaryData.fromObject(testRunMap); SyncPoller<BinaryData, BinaryData> testRunPoller = testRunClient.beginTestRun(testRunId, testRun, null); System.out.println(testRunPoller.poll().getValue().toString()); /* * END: Start test run */ /* * BEGIN: Stop test run */ try { Thread.sleep(10 * 1000); } catch (InterruptedException e) { } Response<BinaryData> stoppedTestRunOut = testRunClient.stopTestRunWithResponse(testRunId, null); System.out.println(stoppedTestRunOut.getValue().toString()); /* * END: Stop test run */ /* * BEGIN: List metrics */ PollResponse<BinaryData> testRunOut = testRunPoller.poll(); JsonNode testRunJson = null; String testStatus = null, startDateTime = null, endDateTime = null; while (!testRunOut.getStatus().isComplete()) { testRunOut = testRunPoller.poll(); try { testRunJson = new ObjectMapper().readTree(testRunOut.getValue().toString()); testStatus = testRunJson.get("status").asText(); System.out.println("Status of test run: " + testStatus); } catch (JsonProcessingException e) { e.printStackTrace(); } try { Thread.sleep(5 * 1000); } catch (InterruptedException e) { } } try { testRunJson = new ObjectMapper().readTree(testRunPoller.getFinalResult().toString()); startDateTime = testRunJson.get("startDateTime").asText(); endDateTime = testRunJson.get("endDateTime").asText(); } catch (JsonProcessingException e) { e.printStackTrace(); } Response<BinaryData> metricNamespacesOut = testRunClient.getMetricNamespacesWithResponse(testRunId, null); String metricNamespace = null; try { JsonNode metricNamespacesJson = new ObjectMapper().readTree(metricNamespacesOut.getValue().toString()); metricNamespace = metricNamespacesJson.get("value").get(0).get("metricNamespaceName").asText(); } catch (JsonProcessingException e) { e.printStackTrace(); } Response<BinaryData> metricDefinitionsOut = testRunClient.getMetricDefinitionsWithResponse(testRunId, metricNamespace, null); String metricName = null; try { JsonNode metricDefinitionsJson = new ObjectMapper().readTree(metricDefinitionsOut.getValue().toString()); metricName = metricDefinitionsJson.get("value").get(0).get("name").get("value").asText(); } catch (JsonProcessingException e) { e.printStackTrace(); } PagedIterable<BinaryData> clientMetricsOut = testRunClient.listMetrics(testRunId, metricName, metricNamespace, startDateTime + '/' + endDateTime, null); clientMetricsOut.forEach((clientMetric) -> { System.out.println(clientMetric.toString()); }); /* * END: List metrics */ }
class HelloWorld { /** * Authenticates with the load testing resource and shows how to list tests, test files and test runs * for a given resource. * * @param args Unused. Arguments to the program. * * @throws ClientAuthenticationException - when the credentials have insufficient permissions for load test resource. * @throws ResourceNotFoundException - when test with `testId` does not exist when listing files. */ }
class HelloWorld { /** * Authenticates with the load testing resource and shows how to list tests, test files and test runs * for a given resource. * * @param args Unused. Arguments to the program. * * @throws ClientAuthenticationException - when the credentials have insufficient permissions for load test resource. * @throws ResourceNotFoundException - when test with `testId` does not exist when listing files. */ }
It should be Ok to use the actual operator type name when documenting their internal behavior, as these are for dev references. Something like - _The re-subscribe nature of 'MonoRepeat' following the emission of a link will cause the 'MonoFlatmap' (the upstream of repeat operator) to cache the same link. When AmqpReceiveLinkProcessor later requests a new link, the corresponding request from the 'MonoRepeat' will be answered with the cached (and closed) link by the 'MonoFlatmap'. We'll filter out these cached (closed) links to avoid AmqpReceiveLinkProcessor from doing unusable work (creating subscriptions and attempting to place the credit) on those links and associated logging. See the PR description "https://github.com/Azure/azure-sdk-for-java/pull/33204" for more details_.
private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); LOGGER.atInfo() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Creating consumer."); final Mono<ServiceBusReceiveLink> receiveLinkMono = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier); } }).doOnNext(next -> { LOGGER.atVerbose() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, next.getEntityPath()) .addKeyValue("mode", receiverOptions.getReceiveMode()) .addKeyValue("isSessionEnabled", receiverOptions.isSessionReceiver()) .addKeyValue(ENTITY_TYPE_KEY, entityType) .log("Created consumer for Service Bus resource."); }); final Mono<ServiceBusReceiveLink> retryableReceiveLinkMono = RetryUtil.withRetry(receiveLinkMono.onErrorMap( RequestResponseChannelClosedException.class, e -> { return new AmqpException(true, e.getMessage(), e, null); }), connectionProcessor.getRetryOptions(), "Failed to create receive link " + linkName, true); final Flux<ServiceBusReceiveLink> receiveLinkFlux = retryableReceiveLinkMono .repeat() .filter(link -> link != null && !link.isDisposed()); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLinkFlux.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy)); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } }
private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); LOGGER.atInfo() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Creating consumer."); final Mono<ServiceBusReceiveLink> receiveLinkMono = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier); } }).doOnNext(next -> { LOGGER.atVerbose() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, next.getEntityPath()) .addKeyValue("mode", receiverOptions.getReceiveMode()) .addKeyValue("isSessionEnabled", receiverOptions.isSessionReceiver()) .addKeyValue(ENTITY_TYPE_KEY, entityType) .log("Created consumer for Service Bus resource."); }); final Mono<ServiceBusReceiveLink> retryableReceiveLinkMono = RetryUtil.withRetry(receiveLinkMono.onErrorMap( RequestResponseChannelClosedException.class, e -> { return new AmqpException(true, e.getMessage(), e, null); }), connectionProcessor.getRetryOptions(), "Failed to create receive link " + linkName, true); final Flux<ServiceBusReceiveLink> receiveLinkFlux = retryableReceiveLinkMono .repeat() .filter(link -> !link.isDisposed()); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLinkFlux.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy)); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final String identifier; private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); private final AutoCloseable trackSettlementSequenceNumber; /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param instrumentation ServiceBus tracing and metrics helper * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, String identifier) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, renewal.getLockToken()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; this.identifier = identifier; this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atInfo() .addKeyValue(SESSION_ID_KEY, renewal.getSessionId()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.identifier = sessionManager.getIdentifier(); this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Gets the SessionId of the session if this receiver is a session receiver. * * @return The SessionId or null if this is not a session receiver. */ public String getSessionId() { return receiverOptions.getSessionId(); } /** * Gets the identifier of the instance of {@link ServiceBusReceiverAsyncClient}. * * @return The identifier that can identify the instance of {@link ServiceBusReceiverAsyncClient}. */ public String getIdentifier() { return identifier; } /** * Abandons a {@link ServiceBusReceivedMessage message}. This will make the message available again for processing. * Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandons a {@link ServiceBusReceivedMessage message} updates the message's properties. This will make the * message available again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options The options to set while abandoning the message. * * @return A {@link Mono} that completes when the Service Bus operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message} with the given options. This will delete the message from * the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to complete the message. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with the options set. This will move message into * the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to defer the message. * * @return A {@link Mono} that completes when the defer operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue with the given options. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to dead-letter the message. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already closed. * @throws ServiceBusException if the session state could not be acquired. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } Mono<ServiceBusReceivedMessage> result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, sequence) .log("Peek message."); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, current) .log("Updating last peeked sequence number."); sink.next(message); }); return tracer.traceManagementReceive("ServiceBus.peekMessage", result, ServiceBusReceivedMessage::getContext); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber) { return peekMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return tracer.traceManagementReceive("ServiceBus.peekMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return tracer.traceSyncReceive("ServiceBus.peekMessages", peekMessages(maxMessages, receiverOptions.getSessionId())); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, nextSequenceNumber).log("Peek batch."); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, current).log("Last peeked sequence number in batch."); sink.complete(); }); return Flux.merge(messages, handle); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber) { return peekMessages(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return tracer.traceSyncReceive("ServiceBus.peekMessages", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE))); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while receiving messages. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveMessages"))); } return receiveMessagesNoBackPressure().limitRate(1, 0); } @SuppressWarnings("try") Flux<ServiceBusReceivedMessage> receiveMessagesNoBackPressure() { return receiveMessagesWithContext(0) .handle((serviceBusMessageContext, sink) -> { try (AutoCloseable scope = tracer.makeSpanCurrent(serviceBusMessageContext.getMessage().getContext())) { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); } catch (Exception ex) { LOGGER.verbose("Error disposing scope", ex); } }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { return receiveMessagesWithContext(1); } Flux<ServiceBusMessageContext> receiveMessagesWithContext(int highTide) { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> messageFluxWithTracing = new FluxTrace(messageFlux, instrumentation); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (!receiverOptions.isSessionReceiver() && receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFluxWithTracing, receiverOptions, renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFluxWithTracing; } Flux<ServiceBusMessageContext> result; if (receiverOptions.isEnableAutoComplete()) { result = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { result = withAutoLockRenewal; } if (highTide > 0) { result = result.limitRate(highTide, 0); } return result .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessage"))); } return tracer.traceManagementReceive("ServiceBus.receiveDeferredMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred messages cannot be received. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return tracer.traceSyncReceive("ServiceBus.receiveDeferredMessages", receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId())); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. * @throws IllegalStateException if receiver is already disposed. * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws ServiceBusException if deferred message cannot be received. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } if (sequenceNumbers == null) { return fluxError(LOGGER, new NullPointerException("'sequenceNumbers' cannot be null")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Package-private method that releases a message. * * @param message Message to release. * @return Mono that completes when message is successfully released. */ Mono<Void> release(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.RELEASED, null, null, null, null); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ServiceBusReceiveMode * server for this receiver instance for a duration as specified during the entity creation (LockDuration). If * processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the * lock is reset to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * @throws IllegalStateException if the receiver is a session receiver or receiver is already disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { final String errorMessage = "Renewing message lock is an invalid operation when working with sessions."; return monoError(LOGGER, new IllegalStateException(errorMessage)); } return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", renewMessageLock(message.getLockToken()), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. * @throws IllegalStateException if receiver is already disposed. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A Mono that completes when the message renewal operation has completed up until * {@code maxLockRenewalDuration}. * * @throws NullPointerException if {@code message}, {@code message.getLockToken()}, or * {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. * @throws ServiceBusException If the message lock cannot be renewed. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", operation.getCompletionOperation(), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver or if receiver is already disposed. * @throws ServiceBusException if the session lock cannot be renewed. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. * @throws ServiceBusException if the session lock renewal operation cannot be started. * @throws IllegalArgumentException if {@code sessionId} is an empty string or {@code maxLockRenewalDuration} is negative. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already disposed. * @throws ServiceBusException if the session state cannot be set. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext transaction context} should be * passed to all operations that needs to be in this transaction. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if a transaction cannot be created. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId()))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Commits the transaction and all the operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to be commit. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be committed. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Rollbacks the transaction given and all operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to rollback. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be rolled back. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.rollbackTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Disposes of the consumer by closing the underlying links to the service. */ @Override public void close() { if (isDisposed.get()) { return; } try { boolean acquired = completionLock.tryAcquire(5, TimeUnit.SECONDS); if (!acquired) { LOGGER.info("Unable to obtain completion lock."); } } catch (InterruptedException e) { LOGGER.info("Unable to obtain completion lock.", e); } if (isDisposed.getAndSet(true)) { return; } LOGGER.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } managementNodeLocks.close(); renewalContainer.close(); if (trackSettlementSequenceNumber != null) { try { trackSettlementSequenceNumber.close(); } catch (Exception e) { LOGGER.info("Unable to close settlement sequence number subscription.", e); } } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ServiceBusReceiveMode.PEEK_LOCK) { return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } else if (message.isSettled()) { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("The message has either been deleted or already settled."))); } else if (message.getLockToken() == null) { final String errorMessage = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error( LOGGER.logExceptionAsError(new UnsupportedOperationException(errorMessage)) ); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(SESSION_ID_KEY, sessionIdToUse) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update started."); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { LOGGER.atInfo() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Management node Update completed."); message.setIsSettled(); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { message.setIsSettled(); renewalContainer.remove(lockToken); return Mono.empty(); } LOGGER.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update completed."); message.setIsSettled(); renewalContainer.remove(lockToken); })); } } return instrumentation.instrumentSettlement(updateDispositionOperation, message, message.getContext(), dispositionStatus) .onErrorMap(throwable -> { if (throwable instanceof ServiceBusException) { return throwable; } switch (dispositionStatus) { case COMPLETED: return new ServiceBusException(throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusException(throwable, ServiceBusErrorSource.ABANDON); default: return new ServiceBusException(throwable, ServiceBusErrorSource.UNKNOWN); } }); } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.renewSessionLock", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(LOGGER, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMono("ServiceBus.renewSessionLock", operation.getCompletionOperation()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.setSessionState", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName))) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot get session state on a non-session receiver.")); } Mono<byte[]> result; if (sessionManager != null) { result = sessionManager.getSessionState(sessionId); } else { result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } return tracer.traceMono("ServiceBus.setSessionState", result) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } ServiceBusReceiverInstrumentation getInstrumentation() { return instrumentation; } /** * Map the error to {@link ServiceBusException} */ private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, errorSource); } return throwable; } boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } boolean isManagementNodeLocksClosed() { return this.managementNodeLocks.isClosed(); } boolean isRenewalContainerClosed() { return this.renewalContainer.isClosed(); } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final String identifier; private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); private final AutoCloseable trackSettlementSequenceNumber; /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param instrumentation ServiceBus tracing and metrics helper * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, String identifier) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, renewal.getLockToken()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; this.identifier = identifier; this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atInfo() .addKeyValue(SESSION_ID_KEY, renewal.getSessionId()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.identifier = sessionManager.getIdentifier(); this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Gets the SessionId of the session if this receiver is a session receiver. * * @return The SessionId or null if this is not a session receiver. */ public String getSessionId() { return receiverOptions.getSessionId(); } /** * Gets the identifier of the instance of {@link ServiceBusReceiverAsyncClient}. * * @return The identifier that can identify the instance of {@link ServiceBusReceiverAsyncClient}. */ public String getIdentifier() { return identifier; } /** * Abandons a {@link ServiceBusReceivedMessage message}. This will make the message available again for processing. * Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandons a {@link ServiceBusReceivedMessage message} updates the message's properties. This will make the * message available again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options The options to set while abandoning the message. * * @return A {@link Mono} that completes when the Service Bus operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message} with the given options. This will delete the message from * the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to complete the message. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with the options set. This will move message into * the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to defer the message. * * @return A {@link Mono} that completes when the defer operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue with the given options. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to dead-letter the message. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already closed. * @throws ServiceBusException if the session state could not be acquired. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } Mono<ServiceBusReceivedMessage> result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, sequence) .log("Peek message."); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, current) .log("Updating last peeked sequence number."); sink.next(message); }); return tracer.traceManagementReceive("ServiceBus.peekMessage", result, ServiceBusReceivedMessage::getContext); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber) { return peekMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return tracer.traceManagementReceive("ServiceBus.peekMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return tracer.traceSyncReceive("ServiceBus.peekMessages", peekMessages(maxMessages, receiverOptions.getSessionId())); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, nextSequenceNumber).log("Peek batch."); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, current).log("Last peeked sequence number in batch."); sink.complete(); }); return Flux.merge(messages, handle); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber) { return peekMessages(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return tracer.traceSyncReceive("ServiceBus.peekMessages", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE))); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while receiving messages. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveMessages"))); } return receiveMessagesNoBackPressure().limitRate(1, 0); } @SuppressWarnings("try") Flux<ServiceBusReceivedMessage> receiveMessagesNoBackPressure() { return receiveMessagesWithContext(0) .handle((serviceBusMessageContext, sink) -> { try (AutoCloseable scope = tracer.makeSpanCurrent(serviceBusMessageContext.getMessage().getContext())) { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); } catch (Exception ex) { LOGGER.verbose("Error disposing scope", ex); } }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { return receiveMessagesWithContext(1); } Flux<ServiceBusMessageContext> receiveMessagesWithContext(int highTide) { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> messageFluxWithTracing = new FluxTrace(messageFlux, instrumentation); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (!receiverOptions.isSessionReceiver() && receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFluxWithTracing, receiverOptions, renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFluxWithTracing; } Flux<ServiceBusMessageContext> result; if (receiverOptions.isEnableAutoComplete()) { result = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { result = withAutoLockRenewal; } if (highTide > 0) { result = result.limitRate(highTide, 0); } return result .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessage"))); } return tracer.traceManagementReceive("ServiceBus.receiveDeferredMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred messages cannot be received. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return tracer.traceSyncReceive("ServiceBus.receiveDeferredMessages", receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId())); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. * @throws IllegalStateException if receiver is already disposed. * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws ServiceBusException if deferred message cannot be received. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } if (sequenceNumbers == null) { return fluxError(LOGGER, new NullPointerException("'sequenceNumbers' cannot be null")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Package-private method that releases a message. * * @param message Message to release. * @return Mono that completes when message is successfully released. */ Mono<Void> release(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.RELEASED, null, null, null, null); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ServiceBusReceiveMode * server for this receiver instance for a duration as specified during the entity creation (LockDuration). If * processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the * lock is reset to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * @throws IllegalStateException if the receiver is a session receiver or receiver is already disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { final String errorMessage = "Renewing message lock is an invalid operation when working with sessions."; return monoError(LOGGER, new IllegalStateException(errorMessage)); } return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", renewMessageLock(message.getLockToken()), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. * @throws IllegalStateException if receiver is already disposed. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A Mono that completes when the message renewal operation has completed up until * {@code maxLockRenewalDuration}. * * @throws NullPointerException if {@code message}, {@code message.getLockToken()}, or * {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. * @throws ServiceBusException If the message lock cannot be renewed. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", operation.getCompletionOperation(), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver or if receiver is already disposed. * @throws ServiceBusException if the session lock cannot be renewed. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. * @throws ServiceBusException if the session lock renewal operation cannot be started. * @throws IllegalArgumentException if {@code sessionId} is an empty string or {@code maxLockRenewalDuration} is negative. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already disposed. * @throws ServiceBusException if the session state cannot be set. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext transaction context} should be * passed to all operations that needs to be in this transaction. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if a transaction cannot be created. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId()))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Commits the transaction and all the operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to be commit. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be committed. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Rollbacks the transaction given and all operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to rollback. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be rolled back. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.rollbackTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Disposes of the consumer by closing the underlying links to the service. */ @Override public void close() { if (isDisposed.get()) { return; } try { boolean acquired = completionLock.tryAcquire(5, TimeUnit.SECONDS); if (!acquired) { LOGGER.info("Unable to obtain completion lock."); } } catch (InterruptedException e) { LOGGER.info("Unable to obtain completion lock.", e); } if (isDisposed.getAndSet(true)) { return; } LOGGER.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } managementNodeLocks.close(); renewalContainer.close(); if (trackSettlementSequenceNumber != null) { try { trackSettlementSequenceNumber.close(); } catch (Exception e) { LOGGER.info("Unable to close settlement sequence number subscription.", e); } } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ServiceBusReceiveMode.PEEK_LOCK) { return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } else if (message.isSettled()) { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("The message has either been deleted or already settled."))); } else if (message.getLockToken() == null) { final String errorMessage = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error( LOGGER.logExceptionAsError(new UnsupportedOperationException(errorMessage)) ); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(SESSION_ID_KEY, sessionIdToUse) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update started."); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { LOGGER.atInfo() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Management node Update completed."); message.setIsSettled(); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { message.setIsSettled(); renewalContainer.remove(lockToken); return Mono.empty(); } LOGGER.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update completed."); message.setIsSettled(); renewalContainer.remove(lockToken); })); } } return instrumentation.instrumentSettlement(updateDispositionOperation, message, message.getContext(), dispositionStatus) .onErrorMap(throwable -> { if (throwable instanceof ServiceBusException) { return throwable; } switch (dispositionStatus) { case COMPLETED: return new ServiceBusException(throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusException(throwable, ServiceBusErrorSource.ABANDON); default: return new ServiceBusException(throwable, ServiceBusErrorSource.UNKNOWN); } }); } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.renewSessionLock", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(LOGGER, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMono("ServiceBus.renewSessionLock", operation.getCompletionOperation()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.setSessionState", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName))) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot get session state on a non-session receiver.")); } Mono<byte[]> result; if (sessionManager != null) { result = sessionManager.getSessionState(sessionId); } else { result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } return tracer.traceMono("ServiceBus.setSessionState", result) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } ServiceBusReceiverInstrumentation getInstrumentation() { return instrumentation; } /** * Map the error to {@link ServiceBusException} */ private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, errorSource); } return throwable; } boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } boolean isManagementNodeLocksClosed() { return this.managementNodeLocks.isClosed(); } boolean isRenewalContainerClosed() { return this.renewalContainer.isClosed(); } }
My understanding is - Per reactive spec, flowing _null_ values in a reactor chain is prohibited, so we don't have to check for null values.
private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); LOGGER.atInfo() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Creating consumer."); final Mono<ServiceBusReceiveLink> receiveLinkMono = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier); } }).doOnNext(next -> { LOGGER.atVerbose() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, next.getEntityPath()) .addKeyValue("mode", receiverOptions.getReceiveMode()) .addKeyValue("isSessionEnabled", receiverOptions.isSessionReceiver()) .addKeyValue(ENTITY_TYPE_KEY, entityType) .log("Created consumer for Service Bus resource."); }); final Mono<ServiceBusReceiveLink> retryableReceiveLinkMono = RetryUtil.withRetry(receiveLinkMono.onErrorMap( RequestResponseChannelClosedException.class, e -> { return new AmqpException(true, e.getMessage(), e, null); }), connectionProcessor.getRetryOptions(), "Failed to create receive link " + linkName, true); final Flux<ServiceBusReceiveLink> receiveLinkFlux = retryableReceiveLinkMono .repeat() .filter(link -> link != null && !link.isDisposed()); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLinkFlux.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy)); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } }
private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); LOGGER.atInfo() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Creating consumer."); final Mono<ServiceBusReceiveLink> receiveLinkMono = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier); } }).doOnNext(next -> { LOGGER.atVerbose() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, next.getEntityPath()) .addKeyValue("mode", receiverOptions.getReceiveMode()) .addKeyValue("isSessionEnabled", receiverOptions.isSessionReceiver()) .addKeyValue(ENTITY_TYPE_KEY, entityType) .log("Created consumer for Service Bus resource."); }); final Mono<ServiceBusReceiveLink> retryableReceiveLinkMono = RetryUtil.withRetry(receiveLinkMono.onErrorMap( RequestResponseChannelClosedException.class, e -> { return new AmqpException(true, e.getMessage(), e, null); }), connectionProcessor.getRetryOptions(), "Failed to create receive link " + linkName, true); final Flux<ServiceBusReceiveLink> receiveLinkFlux = retryableReceiveLinkMono .repeat() .filter(link -> !link.isDisposed()); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLinkFlux.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy)); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final String identifier; private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); private final AutoCloseable trackSettlementSequenceNumber; /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param instrumentation ServiceBus tracing and metrics helper * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, String identifier) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, renewal.getLockToken()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; this.identifier = identifier; this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atInfo() .addKeyValue(SESSION_ID_KEY, renewal.getSessionId()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.identifier = sessionManager.getIdentifier(); this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Gets the SessionId of the session if this receiver is a session receiver. * * @return The SessionId or null if this is not a session receiver. */ public String getSessionId() { return receiverOptions.getSessionId(); } /** * Gets the identifier of the instance of {@link ServiceBusReceiverAsyncClient}. * * @return The identifier that can identify the instance of {@link ServiceBusReceiverAsyncClient}. */ public String getIdentifier() { return identifier; } /** * Abandons a {@link ServiceBusReceivedMessage message}. This will make the message available again for processing. * Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandons a {@link ServiceBusReceivedMessage message} updates the message's properties. This will make the * message available again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options The options to set while abandoning the message. * * @return A {@link Mono} that completes when the Service Bus operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message} with the given options. This will delete the message from * the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to complete the message. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with the options set. This will move message into * the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to defer the message. * * @return A {@link Mono} that completes when the defer operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue with the given options. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to dead-letter the message. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already closed. * @throws ServiceBusException if the session state could not be acquired. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } Mono<ServiceBusReceivedMessage> result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, sequence) .log("Peek message."); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, current) .log("Updating last peeked sequence number."); sink.next(message); }); return tracer.traceManagementReceive("ServiceBus.peekMessage", result, ServiceBusReceivedMessage::getContext); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber) { return peekMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return tracer.traceManagementReceive("ServiceBus.peekMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return tracer.traceSyncReceive("ServiceBus.peekMessages", peekMessages(maxMessages, receiverOptions.getSessionId())); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, nextSequenceNumber).log("Peek batch."); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, current).log("Last peeked sequence number in batch."); sink.complete(); }); return Flux.merge(messages, handle); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber) { return peekMessages(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return tracer.traceSyncReceive("ServiceBus.peekMessages", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE))); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while receiving messages. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveMessages"))); } return receiveMessagesNoBackPressure().limitRate(1, 0); } @SuppressWarnings("try") Flux<ServiceBusReceivedMessage> receiveMessagesNoBackPressure() { return receiveMessagesWithContext(0) .handle((serviceBusMessageContext, sink) -> { try (AutoCloseable scope = tracer.makeSpanCurrent(serviceBusMessageContext.getMessage().getContext())) { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); } catch (Exception ex) { LOGGER.verbose("Error disposing scope", ex); } }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { return receiveMessagesWithContext(1); } Flux<ServiceBusMessageContext> receiveMessagesWithContext(int highTide) { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> messageFluxWithTracing = new FluxTrace(messageFlux, instrumentation); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (!receiverOptions.isSessionReceiver() && receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFluxWithTracing, receiverOptions, renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFluxWithTracing; } Flux<ServiceBusMessageContext> result; if (receiverOptions.isEnableAutoComplete()) { result = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { result = withAutoLockRenewal; } if (highTide > 0) { result = result.limitRate(highTide, 0); } return result .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessage"))); } return tracer.traceManagementReceive("ServiceBus.receiveDeferredMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred messages cannot be received. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return tracer.traceSyncReceive("ServiceBus.receiveDeferredMessages", receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId())); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. * @throws IllegalStateException if receiver is already disposed. * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws ServiceBusException if deferred message cannot be received. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } if (sequenceNumbers == null) { return fluxError(LOGGER, new NullPointerException("'sequenceNumbers' cannot be null")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Package-private method that releases a message. * * @param message Message to release. * @return Mono that completes when message is successfully released. */ Mono<Void> release(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.RELEASED, null, null, null, null); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ServiceBusReceiveMode * server for this receiver instance for a duration as specified during the entity creation (LockDuration). If * processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the * lock is reset to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * @throws IllegalStateException if the receiver is a session receiver or receiver is already disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { final String errorMessage = "Renewing message lock is an invalid operation when working with sessions."; return monoError(LOGGER, new IllegalStateException(errorMessage)); } return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", renewMessageLock(message.getLockToken()), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. * @throws IllegalStateException if receiver is already disposed. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A Mono that completes when the message renewal operation has completed up until * {@code maxLockRenewalDuration}. * * @throws NullPointerException if {@code message}, {@code message.getLockToken()}, or * {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. * @throws ServiceBusException If the message lock cannot be renewed. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", operation.getCompletionOperation(), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver or if receiver is already disposed. * @throws ServiceBusException if the session lock cannot be renewed. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. * @throws ServiceBusException if the session lock renewal operation cannot be started. * @throws IllegalArgumentException if {@code sessionId} is an empty string or {@code maxLockRenewalDuration} is negative. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already disposed. * @throws ServiceBusException if the session state cannot be set. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext transaction context} should be * passed to all operations that needs to be in this transaction. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if a transaction cannot be created. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId()))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Commits the transaction and all the operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to be commit. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be committed. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Rollbacks the transaction given and all operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to rollback. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be rolled back. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.rollbackTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Disposes of the consumer by closing the underlying links to the service. */ @Override public void close() { if (isDisposed.get()) { return; } try { boolean acquired = completionLock.tryAcquire(5, TimeUnit.SECONDS); if (!acquired) { LOGGER.info("Unable to obtain completion lock."); } } catch (InterruptedException e) { LOGGER.info("Unable to obtain completion lock.", e); } if (isDisposed.getAndSet(true)) { return; } LOGGER.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } managementNodeLocks.close(); renewalContainer.close(); if (trackSettlementSequenceNumber != null) { try { trackSettlementSequenceNumber.close(); } catch (Exception e) { LOGGER.info("Unable to close settlement sequence number subscription.", e); } } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ServiceBusReceiveMode.PEEK_LOCK) { return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } else if (message.isSettled()) { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("The message has either been deleted or already settled."))); } else if (message.getLockToken() == null) { final String errorMessage = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error( LOGGER.logExceptionAsError(new UnsupportedOperationException(errorMessage)) ); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(SESSION_ID_KEY, sessionIdToUse) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update started."); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { LOGGER.atInfo() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Management node Update completed."); message.setIsSettled(); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { message.setIsSettled(); renewalContainer.remove(lockToken); return Mono.empty(); } LOGGER.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update completed."); message.setIsSettled(); renewalContainer.remove(lockToken); })); } } return instrumentation.instrumentSettlement(updateDispositionOperation, message, message.getContext(), dispositionStatus) .onErrorMap(throwable -> { if (throwable instanceof ServiceBusException) { return throwable; } switch (dispositionStatus) { case COMPLETED: return new ServiceBusException(throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusException(throwable, ServiceBusErrorSource.ABANDON); default: return new ServiceBusException(throwable, ServiceBusErrorSource.UNKNOWN); } }); } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.renewSessionLock", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(LOGGER, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMono("ServiceBus.renewSessionLock", operation.getCompletionOperation()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.setSessionState", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName))) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot get session state on a non-session receiver.")); } Mono<byte[]> result; if (sessionManager != null) { result = sessionManager.getSessionState(sessionId); } else { result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } return tracer.traceMono("ServiceBus.setSessionState", result) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } ServiceBusReceiverInstrumentation getInstrumentation() { return instrumentation; } /** * Map the error to {@link ServiceBusException} */ private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, errorSource); } return throwable; } boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } boolean isManagementNodeLocksClosed() { return this.managementNodeLocks.isClosed(); } boolean isRenewalContainerClosed() { return this.renewalContainer.isClosed(); } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final String identifier; private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); private final AutoCloseable trackSettlementSequenceNumber; /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param instrumentation ServiceBus tracing and metrics helper * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, String identifier) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, renewal.getLockToken()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; this.identifier = identifier; this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atInfo() .addKeyValue(SESSION_ID_KEY, renewal.getSessionId()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.identifier = sessionManager.getIdentifier(); this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Gets the SessionId of the session if this receiver is a session receiver. * * @return The SessionId or null if this is not a session receiver. */ public String getSessionId() { return receiverOptions.getSessionId(); } /** * Gets the identifier of the instance of {@link ServiceBusReceiverAsyncClient}. * * @return The identifier that can identify the instance of {@link ServiceBusReceiverAsyncClient}. */ public String getIdentifier() { return identifier; } /** * Abandons a {@link ServiceBusReceivedMessage message}. This will make the message available again for processing. * Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandons a {@link ServiceBusReceivedMessage message} updates the message's properties. This will make the * message available again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options The options to set while abandoning the message. * * @return A {@link Mono} that completes when the Service Bus operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message} with the given options. This will delete the message from * the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to complete the message. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with the options set. This will move message into * the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to defer the message. * * @return A {@link Mono} that completes when the defer operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue with the given options. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to dead-letter the message. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already closed. * @throws ServiceBusException if the session state could not be acquired. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } Mono<ServiceBusReceivedMessage> result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, sequence) .log("Peek message."); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, current) .log("Updating last peeked sequence number."); sink.next(message); }); return tracer.traceManagementReceive("ServiceBus.peekMessage", result, ServiceBusReceivedMessage::getContext); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber) { return peekMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return tracer.traceManagementReceive("ServiceBus.peekMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return tracer.traceSyncReceive("ServiceBus.peekMessages", peekMessages(maxMessages, receiverOptions.getSessionId())); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, nextSequenceNumber).log("Peek batch."); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, current).log("Last peeked sequence number in batch."); sink.complete(); }); return Flux.merge(messages, handle); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber) { return peekMessages(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return tracer.traceSyncReceive("ServiceBus.peekMessages", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE))); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while receiving messages. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveMessages"))); } return receiveMessagesNoBackPressure().limitRate(1, 0); } @SuppressWarnings("try") Flux<ServiceBusReceivedMessage> receiveMessagesNoBackPressure() { return receiveMessagesWithContext(0) .handle((serviceBusMessageContext, sink) -> { try (AutoCloseable scope = tracer.makeSpanCurrent(serviceBusMessageContext.getMessage().getContext())) { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); } catch (Exception ex) { LOGGER.verbose("Error disposing scope", ex); } }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { return receiveMessagesWithContext(1); } Flux<ServiceBusMessageContext> receiveMessagesWithContext(int highTide) { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> messageFluxWithTracing = new FluxTrace(messageFlux, instrumentation); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (!receiverOptions.isSessionReceiver() && receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFluxWithTracing, receiverOptions, renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFluxWithTracing; } Flux<ServiceBusMessageContext> result; if (receiverOptions.isEnableAutoComplete()) { result = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { result = withAutoLockRenewal; } if (highTide > 0) { result = result.limitRate(highTide, 0); } return result .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessage"))); } return tracer.traceManagementReceive("ServiceBus.receiveDeferredMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred messages cannot be received. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return tracer.traceSyncReceive("ServiceBus.receiveDeferredMessages", receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId())); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. * @throws IllegalStateException if receiver is already disposed. * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws ServiceBusException if deferred message cannot be received. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } if (sequenceNumbers == null) { return fluxError(LOGGER, new NullPointerException("'sequenceNumbers' cannot be null")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Package-private method that releases a message. * * @param message Message to release. * @return Mono that completes when message is successfully released. */ Mono<Void> release(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.RELEASED, null, null, null, null); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ServiceBusReceiveMode * server for this receiver instance for a duration as specified during the entity creation (LockDuration). If * processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the * lock is reset to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * @throws IllegalStateException if the receiver is a session receiver or receiver is already disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { final String errorMessage = "Renewing message lock is an invalid operation when working with sessions."; return monoError(LOGGER, new IllegalStateException(errorMessage)); } return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", renewMessageLock(message.getLockToken()), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. * @throws IllegalStateException if receiver is already disposed. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A Mono that completes when the message renewal operation has completed up until * {@code maxLockRenewalDuration}. * * @throws NullPointerException if {@code message}, {@code message.getLockToken()}, or * {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. * @throws ServiceBusException If the message lock cannot be renewed. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", operation.getCompletionOperation(), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver or if receiver is already disposed. * @throws ServiceBusException if the session lock cannot be renewed. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. * @throws ServiceBusException if the session lock renewal operation cannot be started. * @throws IllegalArgumentException if {@code sessionId} is an empty string or {@code maxLockRenewalDuration} is negative. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already disposed. * @throws ServiceBusException if the session state cannot be set. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext transaction context} should be * passed to all operations that needs to be in this transaction. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if a transaction cannot be created. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId()))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Commits the transaction and all the operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to be commit. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be committed. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Rollbacks the transaction given and all operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to rollback. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be rolled back. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.rollbackTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Disposes of the consumer by closing the underlying links to the service. */ @Override public void close() { if (isDisposed.get()) { return; } try { boolean acquired = completionLock.tryAcquire(5, TimeUnit.SECONDS); if (!acquired) { LOGGER.info("Unable to obtain completion lock."); } } catch (InterruptedException e) { LOGGER.info("Unable to obtain completion lock.", e); } if (isDisposed.getAndSet(true)) { return; } LOGGER.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } managementNodeLocks.close(); renewalContainer.close(); if (trackSettlementSequenceNumber != null) { try { trackSettlementSequenceNumber.close(); } catch (Exception e) { LOGGER.info("Unable to close settlement sequence number subscription.", e); } } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ServiceBusReceiveMode.PEEK_LOCK) { return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } else if (message.isSettled()) { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("The message has either been deleted or already settled."))); } else if (message.getLockToken() == null) { final String errorMessage = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error( LOGGER.logExceptionAsError(new UnsupportedOperationException(errorMessage)) ); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(SESSION_ID_KEY, sessionIdToUse) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update started."); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { LOGGER.atInfo() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Management node Update completed."); message.setIsSettled(); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { message.setIsSettled(); renewalContainer.remove(lockToken); return Mono.empty(); } LOGGER.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update completed."); message.setIsSettled(); renewalContainer.remove(lockToken); })); } } return instrumentation.instrumentSettlement(updateDispositionOperation, message, message.getContext(), dispositionStatus) .onErrorMap(throwable -> { if (throwable instanceof ServiceBusException) { return throwable; } switch (dispositionStatus) { case COMPLETED: return new ServiceBusException(throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusException(throwable, ServiceBusErrorSource.ABANDON); default: return new ServiceBusException(throwable, ServiceBusErrorSource.UNKNOWN); } }); } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.renewSessionLock", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(LOGGER, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMono("ServiceBus.renewSessionLock", operation.getCompletionOperation()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.setSessionState", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName))) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot get session state on a non-session receiver.")); } Mono<byte[]> result; if (sessionManager != null) { result = sessionManager.getSessionState(sessionId); } else { result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } return tracer.traceMono("ServiceBus.setSessionState", result) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } ServiceBusReceiverInstrumentation getInstrumentation() { return instrumentation; } /** * Map the error to {@link ServiceBusException} */ private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, errorSource); } return throwable; } boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } boolean isManagementNodeLocksClosed() { return this.managementNodeLocks.isClosed(); } boolean isRenewalContainerClosed() { return this.renewalContainer.isClosed(); } }
I added it because I saw AmqpReceiveLinkProcessor have that null check. But you are right, I'll remove the null check and update the documentation.
private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); LOGGER.atInfo() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Creating consumer."); final Mono<ServiceBusReceiveLink> receiveLinkMono = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier); } }).doOnNext(next -> { LOGGER.atVerbose() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, next.getEntityPath()) .addKeyValue("mode", receiverOptions.getReceiveMode()) .addKeyValue("isSessionEnabled", receiverOptions.isSessionReceiver()) .addKeyValue(ENTITY_TYPE_KEY, entityType) .log("Created consumer for Service Bus resource."); }); final Mono<ServiceBusReceiveLink> retryableReceiveLinkMono = RetryUtil.withRetry(receiveLinkMono.onErrorMap( RequestResponseChannelClosedException.class, e -> { return new AmqpException(true, e.getMessage(), e, null); }), connectionProcessor.getRetryOptions(), "Failed to create receive link " + linkName, true); final Flux<ServiceBusReceiveLink> receiveLinkFlux = retryableReceiveLinkMono .repeat() .filter(link -> link != null && !link.isDisposed()); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLinkFlux.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy)); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } }
private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); LOGGER.atInfo() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Creating consumer."); final Mono<ServiceBusReceiveLink> receiveLinkMono = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier); } }).doOnNext(next -> { LOGGER.atVerbose() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, next.getEntityPath()) .addKeyValue("mode", receiverOptions.getReceiveMode()) .addKeyValue("isSessionEnabled", receiverOptions.isSessionReceiver()) .addKeyValue(ENTITY_TYPE_KEY, entityType) .log("Created consumer for Service Bus resource."); }); final Mono<ServiceBusReceiveLink> retryableReceiveLinkMono = RetryUtil.withRetry(receiveLinkMono.onErrorMap( RequestResponseChannelClosedException.class, e -> { return new AmqpException(true, e.getMessage(), e, null); }), connectionProcessor.getRetryOptions(), "Failed to create receive link " + linkName, true); final Flux<ServiceBusReceiveLink> receiveLinkFlux = retryableReceiveLinkMono .repeat() .filter(link -> !link.isDisposed()); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLinkFlux.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy)); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final String identifier; private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); private final AutoCloseable trackSettlementSequenceNumber; /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param instrumentation ServiceBus tracing and metrics helper * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, String identifier) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, renewal.getLockToken()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; this.identifier = identifier; this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atInfo() .addKeyValue(SESSION_ID_KEY, renewal.getSessionId()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.identifier = sessionManager.getIdentifier(); this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Gets the SessionId of the session if this receiver is a session receiver. * * @return The SessionId or null if this is not a session receiver. */ public String getSessionId() { return receiverOptions.getSessionId(); } /** * Gets the identifier of the instance of {@link ServiceBusReceiverAsyncClient}. * * @return The identifier that can identify the instance of {@link ServiceBusReceiverAsyncClient}. */ public String getIdentifier() { return identifier; } /** * Abandons a {@link ServiceBusReceivedMessage message}. This will make the message available again for processing. * Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandons a {@link ServiceBusReceivedMessage message} updates the message's properties. This will make the * message available again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options The options to set while abandoning the message. * * @return A {@link Mono} that completes when the Service Bus operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message} with the given options. This will delete the message from * the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to complete the message. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with the options set. This will move message into * the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to defer the message. * * @return A {@link Mono} that completes when the defer operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue with the given options. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to dead-letter the message. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already closed. * @throws ServiceBusException if the session state could not be acquired. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } Mono<ServiceBusReceivedMessage> result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, sequence) .log("Peek message."); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, current) .log("Updating last peeked sequence number."); sink.next(message); }); return tracer.traceManagementReceive("ServiceBus.peekMessage", result, ServiceBusReceivedMessage::getContext); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber) { return peekMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return tracer.traceManagementReceive("ServiceBus.peekMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return tracer.traceSyncReceive("ServiceBus.peekMessages", peekMessages(maxMessages, receiverOptions.getSessionId())); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, nextSequenceNumber).log("Peek batch."); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, current).log("Last peeked sequence number in batch."); sink.complete(); }); return Flux.merge(messages, handle); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber) { return peekMessages(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return tracer.traceSyncReceive("ServiceBus.peekMessages", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE))); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while receiving messages. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveMessages"))); } return receiveMessagesNoBackPressure().limitRate(1, 0); } @SuppressWarnings("try") Flux<ServiceBusReceivedMessage> receiveMessagesNoBackPressure() { return receiveMessagesWithContext(0) .handle((serviceBusMessageContext, sink) -> { try (AutoCloseable scope = tracer.makeSpanCurrent(serviceBusMessageContext.getMessage().getContext())) { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); } catch (Exception ex) { LOGGER.verbose("Error disposing scope", ex); } }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { return receiveMessagesWithContext(1); } Flux<ServiceBusMessageContext> receiveMessagesWithContext(int highTide) { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> messageFluxWithTracing = new FluxTrace(messageFlux, instrumentation); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (!receiverOptions.isSessionReceiver() && receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFluxWithTracing, receiverOptions, renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFluxWithTracing; } Flux<ServiceBusMessageContext> result; if (receiverOptions.isEnableAutoComplete()) { result = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { result = withAutoLockRenewal; } if (highTide > 0) { result = result.limitRate(highTide, 0); } return result .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessage"))); } return tracer.traceManagementReceive("ServiceBus.receiveDeferredMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred messages cannot be received. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return tracer.traceSyncReceive("ServiceBus.receiveDeferredMessages", receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId())); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. * @throws IllegalStateException if receiver is already disposed. * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws ServiceBusException if deferred message cannot be received. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } if (sequenceNumbers == null) { return fluxError(LOGGER, new NullPointerException("'sequenceNumbers' cannot be null")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Package-private method that releases a message. * * @param message Message to release. * @return Mono that completes when message is successfully released. */ Mono<Void> release(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.RELEASED, null, null, null, null); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ServiceBusReceiveMode * server for this receiver instance for a duration as specified during the entity creation (LockDuration). If * processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the * lock is reset to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * @throws IllegalStateException if the receiver is a session receiver or receiver is already disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { final String errorMessage = "Renewing message lock is an invalid operation when working with sessions."; return monoError(LOGGER, new IllegalStateException(errorMessage)); } return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", renewMessageLock(message.getLockToken()), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. * @throws IllegalStateException if receiver is already disposed. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A Mono that completes when the message renewal operation has completed up until * {@code maxLockRenewalDuration}. * * @throws NullPointerException if {@code message}, {@code message.getLockToken()}, or * {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. * @throws ServiceBusException If the message lock cannot be renewed. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", operation.getCompletionOperation(), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver or if receiver is already disposed. * @throws ServiceBusException if the session lock cannot be renewed. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. * @throws ServiceBusException if the session lock renewal operation cannot be started. * @throws IllegalArgumentException if {@code sessionId} is an empty string or {@code maxLockRenewalDuration} is negative. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already disposed. * @throws ServiceBusException if the session state cannot be set. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext transaction context} should be * passed to all operations that needs to be in this transaction. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if a transaction cannot be created. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId()))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Commits the transaction and all the operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to be commit. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be committed. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Rollbacks the transaction given and all operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to rollback. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be rolled back. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.rollbackTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Disposes of the consumer by closing the underlying links to the service. */ @Override public void close() { if (isDisposed.get()) { return; } try { boolean acquired = completionLock.tryAcquire(5, TimeUnit.SECONDS); if (!acquired) { LOGGER.info("Unable to obtain completion lock."); } } catch (InterruptedException e) { LOGGER.info("Unable to obtain completion lock.", e); } if (isDisposed.getAndSet(true)) { return; } LOGGER.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } managementNodeLocks.close(); renewalContainer.close(); if (trackSettlementSequenceNumber != null) { try { trackSettlementSequenceNumber.close(); } catch (Exception e) { LOGGER.info("Unable to close settlement sequence number subscription.", e); } } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ServiceBusReceiveMode.PEEK_LOCK) { return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } else if (message.isSettled()) { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("The message has either been deleted or already settled."))); } else if (message.getLockToken() == null) { final String errorMessage = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error( LOGGER.logExceptionAsError(new UnsupportedOperationException(errorMessage)) ); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(SESSION_ID_KEY, sessionIdToUse) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update started."); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { LOGGER.atInfo() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Management node Update completed."); message.setIsSettled(); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { message.setIsSettled(); renewalContainer.remove(lockToken); return Mono.empty(); } LOGGER.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update completed."); message.setIsSettled(); renewalContainer.remove(lockToken); })); } } return instrumentation.instrumentSettlement(updateDispositionOperation, message, message.getContext(), dispositionStatus) .onErrorMap(throwable -> { if (throwable instanceof ServiceBusException) { return throwable; } switch (dispositionStatus) { case COMPLETED: return new ServiceBusException(throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusException(throwable, ServiceBusErrorSource.ABANDON); default: return new ServiceBusException(throwable, ServiceBusErrorSource.UNKNOWN); } }); } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.renewSessionLock", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(LOGGER, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMono("ServiceBus.renewSessionLock", operation.getCompletionOperation()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.setSessionState", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName))) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot get session state on a non-session receiver.")); } Mono<byte[]> result; if (sessionManager != null) { result = sessionManager.getSessionState(sessionId); } else { result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } return tracer.traceMono("ServiceBus.setSessionState", result) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } ServiceBusReceiverInstrumentation getInstrumentation() { return instrumentation; } /** * Map the error to {@link ServiceBusException} */ private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, errorSource); } return throwable; } boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } boolean isManagementNodeLocksClosed() { return this.managementNodeLocks.isClosed(); } boolean isRenewalContainerClosed() { return this.renewalContainer.isClosed(); } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final String identifier; private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); private final AutoCloseable trackSettlementSequenceNumber; /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param instrumentation ServiceBus tracing and metrics helper * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, String identifier) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, renewal.getLockToken()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; this.identifier = identifier; this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atInfo() .addKeyValue(SESSION_ID_KEY, renewal.getSessionId()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.identifier = sessionManager.getIdentifier(); this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Gets the SessionId of the session if this receiver is a session receiver. * * @return The SessionId or null if this is not a session receiver. */ public String getSessionId() { return receiverOptions.getSessionId(); } /** * Gets the identifier of the instance of {@link ServiceBusReceiverAsyncClient}. * * @return The identifier that can identify the instance of {@link ServiceBusReceiverAsyncClient}. */ public String getIdentifier() { return identifier; } /** * Abandons a {@link ServiceBusReceivedMessage message}. This will make the message available again for processing. * Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandons a {@link ServiceBusReceivedMessage message} updates the message's properties. This will make the * message available again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options The options to set while abandoning the message. * * @return A {@link Mono} that completes when the Service Bus operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message} with the given options. This will delete the message from * the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to complete the message. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with the options set. This will move message into * the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to defer the message. * * @return A {@link Mono} that completes when the defer operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue with the given options. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to dead-letter the message. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already closed. * @throws ServiceBusException if the session state could not be acquired. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } Mono<ServiceBusReceivedMessage> result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, sequence) .log("Peek message."); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, current) .log("Updating last peeked sequence number."); sink.next(message); }); return tracer.traceManagementReceive("ServiceBus.peekMessage", result, ServiceBusReceivedMessage::getContext); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber) { return peekMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return tracer.traceManagementReceive("ServiceBus.peekMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return tracer.traceSyncReceive("ServiceBus.peekMessages", peekMessages(maxMessages, receiverOptions.getSessionId())); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, nextSequenceNumber).log("Peek batch."); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, current).log("Last peeked sequence number in batch."); sink.complete(); }); return Flux.merge(messages, handle); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber) { return peekMessages(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return tracer.traceSyncReceive("ServiceBus.peekMessages", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE))); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while receiving messages. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveMessages"))); } return receiveMessagesNoBackPressure().limitRate(1, 0); } @SuppressWarnings("try") Flux<ServiceBusReceivedMessage> receiveMessagesNoBackPressure() { return receiveMessagesWithContext(0) .handle((serviceBusMessageContext, sink) -> { try (AutoCloseable scope = tracer.makeSpanCurrent(serviceBusMessageContext.getMessage().getContext())) { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); } catch (Exception ex) { LOGGER.verbose("Error disposing scope", ex); } }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { return receiveMessagesWithContext(1); } Flux<ServiceBusMessageContext> receiveMessagesWithContext(int highTide) { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> messageFluxWithTracing = new FluxTrace(messageFlux, instrumentation); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (!receiverOptions.isSessionReceiver() && receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFluxWithTracing, receiverOptions, renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFluxWithTracing; } Flux<ServiceBusMessageContext> result; if (receiverOptions.isEnableAutoComplete()) { result = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { result = withAutoLockRenewal; } if (highTide > 0) { result = result.limitRate(highTide, 0); } return result .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessage"))); } return tracer.traceManagementReceive("ServiceBus.receiveDeferredMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred messages cannot be received. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return tracer.traceSyncReceive("ServiceBus.receiveDeferredMessages", receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId())); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. * @throws IllegalStateException if receiver is already disposed. * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws ServiceBusException if deferred message cannot be received. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } if (sequenceNumbers == null) { return fluxError(LOGGER, new NullPointerException("'sequenceNumbers' cannot be null")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Package-private method that releases a message. * * @param message Message to release. * @return Mono that completes when message is successfully released. */ Mono<Void> release(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.RELEASED, null, null, null, null); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ServiceBusReceiveMode * server for this receiver instance for a duration as specified during the entity creation (LockDuration). If * processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the * lock is reset to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * @throws IllegalStateException if the receiver is a session receiver or receiver is already disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { final String errorMessage = "Renewing message lock is an invalid operation when working with sessions."; return monoError(LOGGER, new IllegalStateException(errorMessage)); } return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", renewMessageLock(message.getLockToken()), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. * @throws IllegalStateException if receiver is already disposed. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A Mono that completes when the message renewal operation has completed up until * {@code maxLockRenewalDuration}. * * @throws NullPointerException if {@code message}, {@code message.getLockToken()}, or * {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. * @throws ServiceBusException If the message lock cannot be renewed. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", operation.getCompletionOperation(), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver or if receiver is already disposed. * @throws ServiceBusException if the session lock cannot be renewed. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. * @throws ServiceBusException if the session lock renewal operation cannot be started. * @throws IllegalArgumentException if {@code sessionId} is an empty string or {@code maxLockRenewalDuration} is negative. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already disposed. * @throws ServiceBusException if the session state cannot be set. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext transaction context} should be * passed to all operations that needs to be in this transaction. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if a transaction cannot be created. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId()))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Commits the transaction and all the operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to be commit. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be committed. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Rollbacks the transaction given and all operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to rollback. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be rolled back. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.rollbackTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Disposes of the consumer by closing the underlying links to the service. */ @Override public void close() { if (isDisposed.get()) { return; } try { boolean acquired = completionLock.tryAcquire(5, TimeUnit.SECONDS); if (!acquired) { LOGGER.info("Unable to obtain completion lock."); } } catch (InterruptedException e) { LOGGER.info("Unable to obtain completion lock.", e); } if (isDisposed.getAndSet(true)) { return; } LOGGER.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } managementNodeLocks.close(); renewalContainer.close(); if (trackSettlementSequenceNumber != null) { try { trackSettlementSequenceNumber.close(); } catch (Exception e) { LOGGER.info("Unable to close settlement sequence number subscription.", e); } } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ServiceBusReceiveMode.PEEK_LOCK) { return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } else if (message.isSettled()) { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("The message has either been deleted or already settled."))); } else if (message.getLockToken() == null) { final String errorMessage = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error( LOGGER.logExceptionAsError(new UnsupportedOperationException(errorMessage)) ); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(SESSION_ID_KEY, sessionIdToUse) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update started."); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { LOGGER.atInfo() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Management node Update completed."); message.setIsSettled(); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { message.setIsSettled(); renewalContainer.remove(lockToken); return Mono.empty(); } LOGGER.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update completed."); message.setIsSettled(); renewalContainer.remove(lockToken); })); } } return instrumentation.instrumentSettlement(updateDispositionOperation, message, message.getContext(), dispositionStatus) .onErrorMap(throwable -> { if (throwable instanceof ServiceBusException) { return throwable; } switch (dispositionStatus) { case COMPLETED: return new ServiceBusException(throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusException(throwable, ServiceBusErrorSource.ABANDON); default: return new ServiceBusException(throwable, ServiceBusErrorSource.UNKNOWN); } }); } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.renewSessionLock", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(LOGGER, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMono("ServiceBus.renewSessionLock", operation.getCompletionOperation()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.setSessionState", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName))) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot get session state on a non-session receiver.")); } Mono<byte[]> result; if (sessionManager != null) { result = sessionManager.getSessionState(sessionId); } else { result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } return tracer.traceMono("ServiceBus.setSessionState", result) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } ServiceBusReceiverInstrumentation getInstrumentation() { return instrumentation; } /** * Map the error to {@link ServiceBusException} */ private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, errorSource); } return throwable; } boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } boolean isManagementNodeLocksClosed() { return this.managementNodeLocks.isClosed(); } boolean isRenewalContainerClosed() { return this.renewalContainer.isClosed(); } }
Add trace-level log without request diagnostics.
protected void log(CosmosDiagnosticsContext ctx) { if (this.shouldLogDueToStatusCode(ctx.getStatusCode(), ctx.getSubStatusCode())) { logger.warn( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getCollectionName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx.toString()); } else { logger.info( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getCollectionName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx.toString()); } }
if (this.shouldLogDueToStatusCode(ctx.getStatusCode(), ctx.getSubStatusCode())) {
protected void log(CosmosDiagnosticsContext ctx) { if (ctx.isFailure()) { if (logger.isErrorEnabled()) { logger.error( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx); } } else if (ctx.isThresholdViolated()) { if (logger.isInfoEnabled()) { logger.info( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx); } } else if (logger.isTraceEnabled()) { logger.trace( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx); } else if (logger.isDebugEnabled()) { logger.debug( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{}, Latency: {}, Request charge: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx.getDuration(), ctx.getTotalRequestCharge()); } }
class CosmosDiagnosticsLogger implements CosmosDiagnosticsHandler { private final static Logger logger = LoggerFactory.getLogger(CosmosDiagnosticsLogger.class); private final static ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor(); private final CosmosDiagnosticsLoggerConfig config; private final Set<OperationType> pointOperationTypes = new HashSet<OperationType>() {{ add(OperationType.Create); add(OperationType.Delete); add(OperationType.Patch); add(OperationType.Read); add(OperationType.Replace); add(OperationType.Upsert); }}; /** * Creates an instance of the CosmosDiagnosticLogger class * @param config the configuration determining the conditions when to log an operation */ public CosmosDiagnosticsLogger(CosmosDiagnosticsLoggerConfig config) { checkNotNull(config, "Argument 'config' must not be null."); this.config = config; } /** * Decides whether to log diagnostics for an operation and emits the logs when needed * @param traceContext the Azure trace context * @param diagnosticsContext the Cosmos DB diagnostic context with metadata for the operation */ @Override public final void handleDiagnostics(Context traceContext, CosmosDiagnosticsContext diagnosticsContext) { checkNotNull(diagnosticsContext, "Argument 'diagnosticsContext' must not be null."); if (shouldLog(diagnosticsContext)) { this.log(diagnosticsContext); } } /** * Decides whether to log diagnostics for an operation * @param diagnosticsContext * @return a flag inidcating whether to log the operation or not */ protected boolean shouldLog(CosmosDiagnosticsContext diagnosticsContext) { if (!diagnosticsContext.hasCompleted()) { return false; } if (shouldLogDueToStatusCode(diagnosticsContext.getStatusCode(), diagnosticsContext.getSubStatusCode())) { return true; } ResourceType resourceType = ctxAccessor.getResourceType(diagnosticsContext); OperationType operationType = ctxAccessor.getOperationType(diagnosticsContext); if (resourceType == ResourceType.Document) { if (pointOperationTypes.contains(operationType)) { if (diagnosticsContext.getDuration().compareTo(this.config.getPointOperationLatencyThreshold()) >= 1) { return true; } } else { if (diagnosticsContext.getDuration().compareTo(this.config.getFeedOperationLatencyThreshold()) >= 1) { return true; } } } if (diagnosticsContext.getTotalRequestCharge() > this.config.getRequestChargeThreshold()) { return true; } return false; } private boolean shouldLogDueToStatusCode(int statusCode, int subStatusCode) { return statusCode >= 500 || statusCode == 408 || statusCode == 410; } /** * Logs the operation. This method can be overridden for example to emit logs to a different target than log4j * @param ctx */ }
class CosmosDiagnosticsLogger implements CosmosDiagnosticsHandler { private final static Logger logger = LoggerFactory.getLogger(CosmosDiagnosticsLogger.class); /** * Creates an instance of the CosmosDiagnosticLogger class */ public CosmosDiagnosticsLogger() { } /** * Decides whether to log diagnostics for an operation and emits the logs when needed * * @param diagnosticsContext the Cosmos DB diagnostic context with metadata for the operation * @param traceContext the Azure trace context */ @Override public final void handleDiagnostics(CosmosDiagnosticsContext diagnosticsContext, Context traceContext) { checkNotNull(diagnosticsContext, "Argument 'diagnosticsContext' must not be null."); if (shouldLog(diagnosticsContext)) { this.log(diagnosticsContext); } } /** * Decides whether to log diagnostics for an operation * @param diagnosticsContext the diagnostics context * @return a flag indicating whether to log the operation or not */ protected boolean shouldLog(CosmosDiagnosticsContext diagnosticsContext) { if (!diagnosticsContext.isCompleted()) { return false; } return diagnosticsContext.isFailure() || diagnosticsContext.isThresholdViolated() || logger.isDebugEnabled(); } /** * Logs the operation. This method can be overridden for example to emit logs to a different target than log4j * @param ctx the diagnostics context */ }
Actually probably devug without diagnostics and trace with diagnostics
protected void log(CosmosDiagnosticsContext ctx) { if (this.shouldLogDueToStatusCode(ctx.getStatusCode(), ctx.getSubStatusCode())) { logger.warn( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getCollectionName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx.toString()); } else { logger.info( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getCollectionName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx.toString()); } }
if (this.shouldLogDueToStatusCode(ctx.getStatusCode(), ctx.getSubStatusCode())) {
protected void log(CosmosDiagnosticsContext ctx) { if (ctx.isFailure()) { if (logger.isErrorEnabled()) { logger.error( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx); } } else if (ctx.isThresholdViolated()) { if (logger.isInfoEnabled()) { logger.info( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx); } } else if (logger.isTraceEnabled()) { logger.trace( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx); } else if (logger.isDebugEnabled()) { logger.debug( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{}, Latency: {}, Request charge: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx.getDuration(), ctx.getTotalRequestCharge()); } }
class CosmosDiagnosticsLogger implements CosmosDiagnosticsHandler { private final static Logger logger = LoggerFactory.getLogger(CosmosDiagnosticsLogger.class); private final static ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor(); private final CosmosDiagnosticsLoggerConfig config; private final Set<OperationType> pointOperationTypes = new HashSet<OperationType>() {{ add(OperationType.Create); add(OperationType.Delete); add(OperationType.Patch); add(OperationType.Read); add(OperationType.Replace); add(OperationType.Upsert); }}; /** * Creates an instance of the CosmosDiagnosticLogger class * @param config the configuration determining the conditions when to log an operation */ public CosmosDiagnosticsLogger(CosmosDiagnosticsLoggerConfig config) { checkNotNull(config, "Argument 'config' must not be null."); this.config = config; } /** * Decides whether to log diagnostics for an operation and emits the logs when needed * @param traceContext the Azure trace context * @param diagnosticsContext the Cosmos DB diagnostic context with metadata for the operation */ @Override public final void handleDiagnostics(Context traceContext, CosmosDiagnosticsContext diagnosticsContext) { checkNotNull(diagnosticsContext, "Argument 'diagnosticsContext' must not be null."); if (shouldLog(diagnosticsContext)) { this.log(diagnosticsContext); } } /** * Decides whether to log diagnostics for an operation * @param diagnosticsContext * @return a flag inidcating whether to log the operation or not */ protected boolean shouldLog(CosmosDiagnosticsContext diagnosticsContext) { if (!diagnosticsContext.hasCompleted()) { return false; } if (shouldLogDueToStatusCode(diagnosticsContext.getStatusCode(), diagnosticsContext.getSubStatusCode())) { return true; } ResourceType resourceType = ctxAccessor.getResourceType(diagnosticsContext); OperationType operationType = ctxAccessor.getOperationType(diagnosticsContext); if (resourceType == ResourceType.Document) { if (pointOperationTypes.contains(operationType)) { if (diagnosticsContext.getDuration().compareTo(this.config.getPointOperationLatencyThreshold()) >= 1) { return true; } } else { if (diagnosticsContext.getDuration().compareTo(this.config.getFeedOperationLatencyThreshold()) >= 1) { return true; } } } if (diagnosticsContext.getTotalRequestCharge() > this.config.getRequestChargeThreshold()) { return true; } return false; } private boolean shouldLogDueToStatusCode(int statusCode, int subStatusCode) { return statusCode >= 500 || statusCode == 408 || statusCode == 410; } /** * Logs the operation. This method can be overridden for example to emit logs to a different target than log4j * @param ctx */ }
class CosmosDiagnosticsLogger implements CosmosDiagnosticsHandler { private final static Logger logger = LoggerFactory.getLogger(CosmosDiagnosticsLogger.class); /** * Creates an instance of the CosmosDiagnosticLogger class */ public CosmosDiagnosticsLogger() { } /** * Decides whether to log diagnostics for an operation and emits the logs when needed * * @param diagnosticsContext the Cosmos DB diagnostic context with metadata for the operation * @param traceContext the Azure trace context */ @Override public final void handleDiagnostics(CosmosDiagnosticsContext diagnosticsContext, Context traceContext) { checkNotNull(diagnosticsContext, "Argument 'diagnosticsContext' must not be null."); if (shouldLog(diagnosticsContext)) { this.log(diagnosticsContext); } } /** * Decides whether to log diagnostics for an operation * @param diagnosticsContext the diagnostics context * @return a flag indicating whether to log the operation or not */ protected boolean shouldLog(CosmosDiagnosticsContext diagnosticsContext) { if (!diagnosticsContext.isCompleted()) { return false; } return diagnosticsContext.isFailure() || diagnosticsContext.isThresholdViolated() || logger.isDebugEnabled(); } /** * Logs the operation. This method can be overridden for example to emit logs to a different target than log4j * @param ctx the diagnostics context */ }
Should the emitted log level be configurable? I.e. to log it at debug, info or warning level? Also, since toString() on the context could become CPU intensive (if in future it uses JSON etc) one can check the log level enabled for the logger and if not enabled for info or warning then do not call toString or put the rest of the info on the stack since it is not going to be logged anyway.
protected void log(CosmosDiagnosticsContext ctx) { if (this.shouldLogDueToStatusCode(ctx.getStatusCode(), ctx.getSubStatusCode())) { logger.warn( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getCollectionName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx.toString()); } else { logger.info( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getCollectionName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx.toString()); } }
logger.info(
protected void log(CosmosDiagnosticsContext ctx) { if (ctx.isFailure()) { if (logger.isErrorEnabled()) { logger.error( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx); } } else if (ctx.isThresholdViolated()) { if (logger.isInfoEnabled()) { logger.info( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx); } } else if (logger.isTraceEnabled()) { logger.trace( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx); } else if (logger.isDebugEnabled()) { logger.debug( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{}, Latency: {}, Request charge: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx.getDuration(), ctx.getTotalRequestCharge()); } }
class CosmosDiagnosticsLogger implements CosmosDiagnosticsHandler { private final static Logger logger = LoggerFactory.getLogger(CosmosDiagnosticsLogger.class); private final static ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor(); private final CosmosDiagnosticsLoggerConfig config; private final Set<OperationType> pointOperationTypes = new HashSet<OperationType>() {{ add(OperationType.Create); add(OperationType.Delete); add(OperationType.Patch); add(OperationType.Read); add(OperationType.Replace); add(OperationType.Upsert); }}; /** * Creates an instance of the CosmosDiagnosticLogger class * @param config the configuration determining the conditions when to log an operation */ public CosmosDiagnosticsLogger(CosmosDiagnosticsLoggerConfig config) { checkNotNull(config, "Argument 'config' must not be null."); this.config = config; } /** * Decides whether to log diagnostics for an operation and emits the logs when needed * @param traceContext the Azure trace context * @param diagnosticsContext the Cosmos DB diagnostic context with metadata for the operation */ @Override public final void handleDiagnostics(Context traceContext, CosmosDiagnosticsContext diagnosticsContext) { checkNotNull(diagnosticsContext, "Argument 'diagnosticsContext' must not be null."); if (shouldLog(diagnosticsContext)) { this.log(diagnosticsContext); } } /** * Decides whether to log diagnostics for an operation * @param diagnosticsContext * @return a flag inidcating whether to log the operation or not */ protected boolean shouldLog(CosmosDiagnosticsContext diagnosticsContext) { if (!diagnosticsContext.hasCompleted()) { return false; } if (shouldLogDueToStatusCode(diagnosticsContext.getStatusCode(), diagnosticsContext.getSubStatusCode())) { return true; } ResourceType resourceType = ctxAccessor.getResourceType(diagnosticsContext); OperationType operationType = ctxAccessor.getOperationType(diagnosticsContext); if (resourceType == ResourceType.Document) { if (pointOperationTypes.contains(operationType)) { if (diagnosticsContext.getDuration().compareTo(this.config.getPointOperationLatencyThreshold()) >= 1) { return true; } } else { if (diagnosticsContext.getDuration().compareTo(this.config.getFeedOperationLatencyThreshold()) >= 1) { return true; } } } if (diagnosticsContext.getTotalRequestCharge() > this.config.getRequestChargeThreshold()) { return true; } return false; } private boolean shouldLogDueToStatusCode(int statusCode, int subStatusCode) { return statusCode >= 500 || statusCode == 408 || statusCode == 410; } /** * Logs the operation. This method can be overridden for example to emit logs to a different target than log4j * @param ctx */ }
class CosmosDiagnosticsLogger implements CosmosDiagnosticsHandler { private final static Logger logger = LoggerFactory.getLogger(CosmosDiagnosticsLogger.class); /** * Creates an instance of the CosmosDiagnosticLogger class */ public CosmosDiagnosticsLogger() { } /** * Decides whether to log diagnostics for an operation and emits the logs when needed * * @param diagnosticsContext the Cosmos DB diagnostic context with metadata for the operation * @param traceContext the Azure trace context */ @Override public final void handleDiagnostics(CosmosDiagnosticsContext diagnosticsContext, Context traceContext) { checkNotNull(diagnosticsContext, "Argument 'diagnosticsContext' must not be null."); if (shouldLog(diagnosticsContext)) { this.log(diagnosticsContext); } } /** * Decides whether to log diagnostics for an operation * @param diagnosticsContext the diagnostics context * @return a flag indicating whether to log the operation or not */ protected boolean shouldLog(CosmosDiagnosticsContext diagnosticsContext) { if (!diagnosticsContext.isCompleted()) { return false; } return diagnosticsContext.isFailure() || diagnosticsContext.isThresholdViolated() || logger.isDebugEnabled(); } /** * Logs the operation. This method can be overridden for example to emit logs to a different target than log4j * @param ctx the diagnostics context */ }
Made the log level configurable.
protected void log(CosmosDiagnosticsContext ctx) { if (this.shouldLogDueToStatusCode(ctx.getStatusCode(), ctx.getSubStatusCode())) { logger.warn( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getCollectionName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx.toString()); } else { logger.info( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getCollectionName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx.toString()); } }
logger.info(
protected void log(CosmosDiagnosticsContext ctx) { if (ctx.isFailure()) { if (logger.isErrorEnabled()) { logger.error( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx); } } else if (ctx.isThresholdViolated()) { if (logger.isInfoEnabled()) { logger.info( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx); } } else if (logger.isTraceEnabled()) { logger.trace( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{} Diagnostics: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx); } else if (logger.isDebugEnabled()) { logger.debug( "Account: {} -> DB: {}, Col:{}, StatusCode: {}:{}, Latency: {}, Request charge: {}", ctx.getAccountName(), ctx.getDatabaseName(), ctx.getContainerName(), ctx.getStatusCode(), ctx.getSubStatusCode(), ctx.getDuration(), ctx.getTotalRequestCharge()); } }
class CosmosDiagnosticsLogger implements CosmosDiagnosticsHandler { private final static Logger logger = LoggerFactory.getLogger(CosmosDiagnosticsLogger.class); private final static ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor(); private final CosmosDiagnosticsLoggerConfig config; private final Set<OperationType> pointOperationTypes = new HashSet<OperationType>() {{ add(OperationType.Create); add(OperationType.Delete); add(OperationType.Patch); add(OperationType.Read); add(OperationType.Replace); add(OperationType.Upsert); }}; /** * Creates an instance of the CosmosDiagnosticLogger class * @param config the configuration determining the conditions when to log an operation */ public CosmosDiagnosticsLogger(CosmosDiagnosticsLoggerConfig config) { checkNotNull(config, "Argument 'config' must not be null."); this.config = config; } /** * Decides whether to log diagnostics for an operation and emits the logs when needed * @param traceContext the Azure trace context * @param diagnosticsContext the Cosmos DB diagnostic context with metadata for the operation */ @Override public final void handleDiagnostics(Context traceContext, CosmosDiagnosticsContext diagnosticsContext) { checkNotNull(diagnosticsContext, "Argument 'diagnosticsContext' must not be null."); if (shouldLog(diagnosticsContext)) { this.log(diagnosticsContext); } } /** * Decides whether to log diagnostics for an operation * @param diagnosticsContext * @return a flag inidcating whether to log the operation or not */ protected boolean shouldLog(CosmosDiagnosticsContext diagnosticsContext) { if (!diagnosticsContext.hasCompleted()) { return false; } if (shouldLogDueToStatusCode(diagnosticsContext.getStatusCode(), diagnosticsContext.getSubStatusCode())) { return true; } ResourceType resourceType = ctxAccessor.getResourceType(diagnosticsContext); OperationType operationType = ctxAccessor.getOperationType(diagnosticsContext); if (resourceType == ResourceType.Document) { if (pointOperationTypes.contains(operationType)) { if (diagnosticsContext.getDuration().compareTo(this.config.getPointOperationLatencyThreshold()) >= 1) { return true; } } else { if (diagnosticsContext.getDuration().compareTo(this.config.getFeedOperationLatencyThreshold()) >= 1) { return true; } } } if (diagnosticsContext.getTotalRequestCharge() > this.config.getRequestChargeThreshold()) { return true; } return false; } private boolean shouldLogDueToStatusCode(int statusCode, int subStatusCode) { return statusCode >= 500 || statusCode == 408 || statusCode == 410; } /** * Logs the operation. This method can be overridden for example to emit logs to a different target than log4j * @param ctx */ }
class CosmosDiagnosticsLogger implements CosmosDiagnosticsHandler { private final static Logger logger = LoggerFactory.getLogger(CosmosDiagnosticsLogger.class); /** * Creates an instance of the CosmosDiagnosticLogger class */ public CosmosDiagnosticsLogger() { } /** * Decides whether to log diagnostics for an operation and emits the logs when needed * * @param diagnosticsContext the Cosmos DB diagnostic context with metadata for the operation * @param traceContext the Azure trace context */ @Override public final void handleDiagnostics(CosmosDiagnosticsContext diagnosticsContext, Context traceContext) { checkNotNull(diagnosticsContext, "Argument 'diagnosticsContext' must not be null."); if (shouldLog(diagnosticsContext)) { this.log(diagnosticsContext); } } /** * Decides whether to log diagnostics for an operation * @param diagnosticsContext the diagnostics context * @return a flag indicating whether to log the operation or not */ protected boolean shouldLog(CosmosDiagnosticsContext diagnosticsContext) { if (!diagnosticsContext.isCompleted()) { return false; } return diagnosticsContext.isFailure() || diagnosticsContext.isThresholdViolated() || logger.isDebugEnabled(); } /** * Logs the operation. This method can be overridden for example to emit logs to a different target than log4j * @param ctx the diagnostics context */ }
Based on https://opentelemetry.io/docs/reference/specification/trace/semantic_conventions/database/ net.peer.name should be Name of the database host (For direct mode spans, it should be Service Endpoint host name) Also, net.peer.port could be used to capture Service Endpoint port number
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) { StartSpanOptions spanOptions = new StartSpanOptions(SpanKind.CLIENT) .setAttribute(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME) .setAttribute("db.system","cosmosdb") .setAttribute("db.operation",spanName) .setAttribute("net.peer.name",cosmosCtx.getAccountName()); String databaseId = cosmosCtx.getDatabaseName(); if (databaseId != null) { spanOptions.setAttribute("db.name", databaseId); } return spanOptions; }
.setAttribute("net.peer.name",cosmosCtx.getAccountName());
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) { StartSpanOptions spanOptions; if (tracer instanceof EnabledNoOpTracer) { spanOptions = new StartSpanOptions(SpanKind.INTERNAL); } else { spanOptions = new StartSpanOptions(SpanKind.INTERNAL) .setAttribute("db.system", "cosmosdb") .setAttribute("db.operation", spanName) .setAttribute("net.peer.name", cosmosCtx.getAccountName()) .setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType()) .setAttribute("db.cosmosdb.resource_type",cosmosCtx.getResourceType()) .setAttribute("db.name", cosmosCtx.getDatabaseName()) .setAttribute("db.cosmosdb.client_id", this.clientId) .setAttribute("user_agent.original", this.userAgent) .setAttribute("db.cosmosdb.connection_mode", this.connectionMode); if (!cosmosCtx.getOperationId().isEmpty() && !cosmosCtx.getOperationId().equals(ctxAccessor.getSpanName(cosmosCtx))) { spanOptions.setAttribute("db.cosmosdb.operation_id", cosmosCtx.getOperationId()); } String containerName = cosmosCtx.getContainerName(); if (containerName != null) { spanOptions.setAttribute("db.cosmosdb.container", containerName); } } return spanOptions; }
class OpenTelemetryCosmosTracer implements CosmosTracer { private final Tracer tracer; public OpenTelemetryCosmosTracer(Tracer tracer) { checkNotNull(tracer, "Argument 'tracer' must not be null."); this.tracer = tracer; } @Override public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) { checkNotNull(spanName, "Argument 'spanName' must not be null."); checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null."); Context local = Objects .requireNonNull(context, "'context' cannot be null.") .addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx); StartSpanOptions spanOptions = this.startSpanOptions( spanName, cosmosCtx); return tracer.start(spanName, spanOptions, local); } @Override public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context) { String errorMessage = null; if (cosmosCtx == null) { return; } if (!cosmosCtx.hasCompleted()) { tracer.end("CosmosCtx not completed yet.", null, context); } if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) { tracer.end(errorMessage, finalError, context); Map<String, Object> attributes = new HashMap<>(); } Throwable finalError = cosmosCtx.getFinalError(); if (finalError != null && cosmosCtx.isFailure()) { if (finalError instanceof CosmosException) { CosmosException cosmosException = (CosmosException)finalError; errorMessage = cosmosException.getMessageWithoutDiagnostics(); } else { errorMessage = finalError.getMessage(); } tracer.setAttribute("exception.type", finalError.getClass().getCanonicalName(), context); tracer.setAttribute("exception.message", errorMessage, context); StringWriter stackWriter = new StringWriter(); PrintWriter printWriter = new PrintWriter(stackWriter); finalError.printStackTrace(printWriter); printWriter.flush(); stackWriter.flush(); tracer.setAttribute("exception.stacktrace", stackWriter.toString(), context); printWriter.close(); try { stackWriter.close(); } catch (IOException e) { LOGGER.warn("Error trying to close StringWriter.", e); } } tracer.setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType(), context); tracer.setAttribute("db.cosmosdb.container",cosmosCtx.getContainerName(), context); tracer.setAttribute( "db.cosmosdb.status_code", Integer.toString(cosmosCtx.getStatusCode()), context); tracer.setAttribute( "db.cosmosdb.sub_status_code", Integer.toString(cosmosCtx.getSubStatusCode()), context); tracer.setAttribute( "db.cosmosdb.request_charge", Float.toString(cosmosCtx.getTotalRequestCharge()), context); tracer.setAttribute("db.cosmosdb.max_request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context); tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context); tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context); Set<String> regionsContacted = cosmosCtx.getContactedRegionNames(); if (regionsContacted != null && !regionsContacted.isEmpty()) { tracer.setAttribute( "db.cosmosdb.regions_contacted", String.join(", ", regionsContacted), context); } tracer.end(errorMessage, finalError, context); } }
class OpenTelemetryCosmosTracer implements CosmosTracer { private final Tracer tracer; private final CosmosClientTelemetryConfig config; private final String clientId; private final String connectionMode; private final String userAgent; public OpenTelemetryCosmosTracer( Tracer tracer, CosmosClientTelemetryConfig config, String clientId, String userAgent, String connectionMode) { checkNotNull(tracer, "Argument 'tracer' must not be null."); checkNotNull(config, "Argument 'config' must not be null."); checkNotNull(clientId, "Argument 'clientId' must not be null."); checkNotNull(userAgent, "Argument 'userAgent' must not be null."); checkNotNull(connectionMode, "Argument 'connectionMode' must not be null."); this.tracer = tracer; this.config = config; this.clientId = clientId; this.userAgent = userAgent; this.connectionMode = connectionMode; } private boolean isTransportLevelTracingEnabled() { return clientTelemetryConfigAccessor.isTransportLevelTracingEnabled(this.config); } @Override public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) { checkNotNull(spanName, "Argument 'spanName' must not be null."); checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null."); Context local = Objects .requireNonNull(context, "'context' cannot be null.") .addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx); StartSpanOptions spanOptions = this.startSpanOptions( spanName, cosmosCtx); return tracer.start(spanName, spanOptions, local); } @Override public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context) { if (cosmosCtx == null) { return; } if (!cosmosCtx.isCompleted()) { tracer.end("CosmosCtx not completed yet.", null, context); return; } String errorMessage = null; Throwable finalError = cosmosCtx.getFinalError(); if (finalError != null && cosmosCtx.isFailure()) { if (finalError instanceof CosmosException) { CosmosException cosmosException = (CosmosException) finalError; errorMessage = cosmosException.getShortMessage(); } else { errorMessage = finalError.getMessage(); } } if (tracer instanceof EnabledNoOpTracer) { tracer.end(errorMessage, finalError, context); return; } if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) { Map<String, Object> attributes = new HashMap<>(); attributes.put("Diagnostics", cosmosCtx.toJson()); if (cosmosCtx.isFailure()) { tracer.addEvent("failure", attributes, OffsetDateTime.now(), context); } else { tracer.addEvent("threshold_violation", attributes, OffsetDateTime.now(), context); } } if (finalError != null) { String exceptionType; if (finalError instanceof CosmosException) { exceptionType = CosmosException.class.getCanonicalName(); } else { exceptionType = finalError.getClass().getCanonicalName(); } tracer.setAttribute("exception.escaped", Boolean.toString(cosmosCtx.isFailure()), context); tracer.setAttribute("exception.type", exceptionType, context); tracer.setAttribute("exception.message", errorMessage, context); tracer.setAttribute("exception.stacktrace", prettifyCallstack(finalError), context); } if (this.isTransportLevelTracingEnabled()) { traceTransportLevel(cosmosCtx, context); } tracer.setAttribute( "db.cosmosdb.status_code", Integer.toString(cosmosCtx.getStatusCode()), context); tracer.setAttribute( "db.cosmosdb.sub_status_code", Integer.toString(cosmosCtx.getSubStatusCode()), context); tracer.setAttribute( "db.cosmosdb.request_charge", Float.toString(cosmosCtx.getTotalRequestCharge()), context); tracer.setAttribute("db.cosmosdb.request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context); tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context); tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context); Set<String> regionsContacted = cosmosCtx.getContactedRegionNames(); if (!regionsContacted.isEmpty()) { tracer.setAttribute( "db.cosmosdb.regions_contacted", String.join(", ", regionsContacted), context); } tracer.end(errorMessage, finalError, context); } private void recordStoreResponseStatistics( List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics, Context context) { for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) { StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult(); StoreResponseDiagnostics storeResponseDiagnostics = storeResultDiagnostics.getStoreResponseDiagnostics(); Map<String, Object> attributes = new HashMap<>(); attributes.put("rntbd.url", storeResultDiagnostics.getStorePhysicalAddressAsString()); attributes.put("rntbd.resource_type", responseStatistics.getRequestResourceType().toString()); attributes.put("rntbd.operation_type", responseStatistics.getRequestOperationType().toString()); attributes.put("rntbd.region", responseStatistics.getRegionName()); if (storeResultDiagnostics.getLsn() > 0) { attributes.put("rntbd.lsn", Long.toString(storeResultDiagnostics.getLsn())); } if (storeResultDiagnostics.getGlobalCommittedLSN() > 0) { attributes.put("rntbd.gclsn", Long.toString(storeResultDiagnostics.getGlobalCommittedLSN())); } String responseSessionToken = responseStatistics.getRequestSessionToken(); if (responseSessionToken != null && !responseSessionToken.isEmpty()) { attributes.put("rntbd.session_token", responseSessionToken); } String requestSessionToken = responseStatistics.getRequestSessionToken(); if (requestSessionToken != null && !requestSessionToken.isEmpty()) { attributes.put("rntbd.request_session_token", requestSessionToken); } String activityId = storeResponseDiagnostics.getActivityId(); if (requestSessionToken != null && !requestSessionToken.isEmpty()) { attributes.put("rntbd.activity_id", activityId); } String pkRangeId = storeResponseDiagnostics.getPartitionKeyRangeId(); if (pkRangeId != null && !pkRangeId.isEmpty()) { attributes.put("rntbd.partition_key_range_id", pkRangeId); } attributes.put("rntbd.status_code", Integer.toString(storeResponseDiagnostics.getStatusCode())); if (storeResponseDiagnostics.getSubStatusCode() != 0) { attributes.put("rntbd.sub_status_code", Integer.toString(storeResponseDiagnostics.getSubStatusCode())); } if (storeResponseDiagnostics.getFaultInjectionRuleId() != null) { attributes.put("rntbd.fault_injection_rule_id", storeResponseDiagnostics.getFaultInjectionRuleId()); } Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs(); if (backendLatency != null) { attributes.put("rntbd.backend_latency", Double.toString(backendLatency)); } double requestCharge = storeResponseDiagnostics.getRequestCharge(); attributes.put("rntbd.request_charge", Double.toString(requestCharge)); Duration latency = responseStatistics.getDuration(); if (latency != null) { attributes.put("rntbd.latency", latency.toString()); } if (storeResponseDiagnostics.getRntbdChannelStatistics() != null) { attributes.put( "rntbd.is_new_channel", storeResponseDiagnostics.getRntbdChannelStatistics().isWaitForConnectionInit()); } OffsetDateTime startTime = null; for (RequestTimeline.Event event : storeResponseDiagnostics.getRequestTimeline()) { OffsetDateTime eventTime = event.getStartTime() != null ? event.getStartTime().atOffset(ZoneOffset.UTC) : null; if (eventTime != null && (startTime == null || startTime.isBefore(eventTime))) { startTime = eventTime; } Duration duration = event.getDuration(); if (duration == null || duration == Duration.ZERO) { continue; } attributes.put("rntbd.latency_" + event.getName().toLowerCase(Locale.ROOT), duration.toString()); } attributes.put("rntbd.request_size_bytes",storeResponseDiagnostics.getRequestPayloadLength()); attributes.put("rntbd.response_size_bytes",storeResponseDiagnostics.getResponsePayloadLength()); this.tracer.addEvent( "rntbd.request", attributes, startTime != null ? startTime : OffsetDateTime.now(), context); } } private void traceTransportLevelRequests( Collection<ClientSideRequestStatistics> clientSideRequestStatistics, Context context) { if (clientSideRequestStatistics != null) { for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) { recordStoreResponseStatistics( requestStatistics.getResponseStatisticsList(), context); recordStoreResponseStatistics( requestStatistics.getSupplementalResponseStatisticsList(), context); } } } private void traceTransportLevel(CosmosDiagnosticsContext diagnosticsContext, Context context) { Collection<ClientSideRequestStatistics> combinedClientSideRequestStatistics = ctxAccessor.getDistinctCombinedClientSideRequestStatistics(diagnosticsContext); traceTransportLevelRequests( combinedClientSideRequestStatistics, context); } }
This trace is for SDK operation - not physical network request - so physical address doesn't work. We had that discussion with OTel folks on the .Net side - whether to use a different attribute name or use this as account. Feedback was to use the account - because when looking at the big picture - app calling downstream service which might each use Cosmos or other database account level is the right abstraction. There will be option to also add request-level tracing - that will have physical address of replica for direct mode.
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) { StartSpanOptions spanOptions = new StartSpanOptions(SpanKind.CLIENT) .setAttribute(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME) .setAttribute("db.system","cosmosdb") .setAttribute("db.operation",spanName) .setAttribute("net.peer.name",cosmosCtx.getAccountName()); String databaseId = cosmosCtx.getDatabaseName(); if (databaseId != null) { spanOptions.setAttribute("db.name", databaseId); } return spanOptions; }
.setAttribute("net.peer.name",cosmosCtx.getAccountName());
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) { StartSpanOptions spanOptions; if (tracer instanceof EnabledNoOpTracer) { spanOptions = new StartSpanOptions(SpanKind.INTERNAL); } else { spanOptions = new StartSpanOptions(SpanKind.INTERNAL) .setAttribute("db.system", "cosmosdb") .setAttribute("db.operation", spanName) .setAttribute("net.peer.name", cosmosCtx.getAccountName()) .setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType()) .setAttribute("db.cosmosdb.resource_type",cosmosCtx.getResourceType()) .setAttribute("db.name", cosmosCtx.getDatabaseName()) .setAttribute("db.cosmosdb.client_id", this.clientId) .setAttribute("user_agent.original", this.userAgent) .setAttribute("db.cosmosdb.connection_mode", this.connectionMode); if (!cosmosCtx.getOperationId().isEmpty() && !cosmosCtx.getOperationId().equals(ctxAccessor.getSpanName(cosmosCtx))) { spanOptions.setAttribute("db.cosmosdb.operation_id", cosmosCtx.getOperationId()); } String containerName = cosmosCtx.getContainerName(); if (containerName != null) { spanOptions.setAttribute("db.cosmosdb.container", containerName); } } return spanOptions; }
class OpenTelemetryCosmosTracer implements CosmosTracer { private final Tracer tracer; public OpenTelemetryCosmosTracer(Tracer tracer) { checkNotNull(tracer, "Argument 'tracer' must not be null."); this.tracer = tracer; } @Override public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) { checkNotNull(spanName, "Argument 'spanName' must not be null."); checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null."); Context local = Objects .requireNonNull(context, "'context' cannot be null.") .addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx); StartSpanOptions spanOptions = this.startSpanOptions( spanName, cosmosCtx); return tracer.start(spanName, spanOptions, local); } @Override public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context) { String errorMessage = null; if (cosmosCtx == null) { return; } if (!cosmosCtx.hasCompleted()) { tracer.end("CosmosCtx not completed yet.", null, context); } if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) { tracer.end(errorMessage, finalError, context); Map<String, Object> attributes = new HashMap<>(); } Throwable finalError = cosmosCtx.getFinalError(); if (finalError != null && cosmosCtx.isFailure()) { if (finalError instanceof CosmosException) { CosmosException cosmosException = (CosmosException)finalError; errorMessage = cosmosException.getMessageWithoutDiagnostics(); } else { errorMessage = finalError.getMessage(); } tracer.setAttribute("exception.type", finalError.getClass().getCanonicalName(), context); tracer.setAttribute("exception.message", errorMessage, context); StringWriter stackWriter = new StringWriter(); PrintWriter printWriter = new PrintWriter(stackWriter); finalError.printStackTrace(printWriter); printWriter.flush(); stackWriter.flush(); tracer.setAttribute("exception.stacktrace", stackWriter.toString(), context); printWriter.close(); try { stackWriter.close(); } catch (IOException e) { LOGGER.warn("Error trying to close StringWriter.", e); } } tracer.setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType(), context); tracer.setAttribute("db.cosmosdb.container",cosmosCtx.getContainerName(), context); tracer.setAttribute( "db.cosmosdb.status_code", Integer.toString(cosmosCtx.getStatusCode()), context); tracer.setAttribute( "db.cosmosdb.sub_status_code", Integer.toString(cosmosCtx.getSubStatusCode()), context); tracer.setAttribute( "db.cosmosdb.request_charge", Float.toString(cosmosCtx.getTotalRequestCharge()), context); tracer.setAttribute("db.cosmosdb.max_request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context); tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context); tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context); Set<String> regionsContacted = cosmosCtx.getContactedRegionNames(); if (regionsContacted != null && !regionsContacted.isEmpty()) { tracer.setAttribute( "db.cosmosdb.regions_contacted", String.join(", ", regionsContacted), context); } tracer.end(errorMessage, finalError, context); } }
class OpenTelemetryCosmosTracer implements CosmosTracer { private final Tracer tracer; private final CosmosClientTelemetryConfig config; private final String clientId; private final String connectionMode; private final String userAgent; public OpenTelemetryCosmosTracer( Tracer tracer, CosmosClientTelemetryConfig config, String clientId, String userAgent, String connectionMode) { checkNotNull(tracer, "Argument 'tracer' must not be null."); checkNotNull(config, "Argument 'config' must not be null."); checkNotNull(clientId, "Argument 'clientId' must not be null."); checkNotNull(userAgent, "Argument 'userAgent' must not be null."); checkNotNull(connectionMode, "Argument 'connectionMode' must not be null."); this.tracer = tracer; this.config = config; this.clientId = clientId; this.userAgent = userAgent; this.connectionMode = connectionMode; } private boolean isTransportLevelTracingEnabled() { return clientTelemetryConfigAccessor.isTransportLevelTracingEnabled(this.config); } @Override public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) { checkNotNull(spanName, "Argument 'spanName' must not be null."); checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null."); Context local = Objects .requireNonNull(context, "'context' cannot be null.") .addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx); StartSpanOptions spanOptions = this.startSpanOptions( spanName, cosmosCtx); return tracer.start(spanName, spanOptions, local); } @Override public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context) { if (cosmosCtx == null) { return; } if (!cosmosCtx.isCompleted()) { tracer.end("CosmosCtx not completed yet.", null, context); return; } String errorMessage = null; Throwable finalError = cosmosCtx.getFinalError(); if (finalError != null && cosmosCtx.isFailure()) { if (finalError instanceof CosmosException) { CosmosException cosmosException = (CosmosException) finalError; errorMessage = cosmosException.getShortMessage(); } else { errorMessage = finalError.getMessage(); } } if (tracer instanceof EnabledNoOpTracer) { tracer.end(errorMessage, finalError, context); return; } if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) { Map<String, Object> attributes = new HashMap<>(); attributes.put("Diagnostics", cosmosCtx.toJson()); if (cosmosCtx.isFailure()) { tracer.addEvent("failure", attributes, OffsetDateTime.now(), context); } else { tracer.addEvent("threshold_violation", attributes, OffsetDateTime.now(), context); } } if (finalError != null) { String exceptionType; if (finalError instanceof CosmosException) { exceptionType = CosmosException.class.getCanonicalName(); } else { exceptionType = finalError.getClass().getCanonicalName(); } tracer.setAttribute("exception.escaped", Boolean.toString(cosmosCtx.isFailure()), context); tracer.setAttribute("exception.type", exceptionType, context); tracer.setAttribute("exception.message", errorMessage, context); tracer.setAttribute("exception.stacktrace", prettifyCallstack(finalError), context); } if (this.isTransportLevelTracingEnabled()) { traceTransportLevel(cosmosCtx, context); } tracer.setAttribute( "db.cosmosdb.status_code", Integer.toString(cosmosCtx.getStatusCode()), context); tracer.setAttribute( "db.cosmosdb.sub_status_code", Integer.toString(cosmosCtx.getSubStatusCode()), context); tracer.setAttribute( "db.cosmosdb.request_charge", Float.toString(cosmosCtx.getTotalRequestCharge()), context); tracer.setAttribute("db.cosmosdb.request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context); tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context); tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context); Set<String> regionsContacted = cosmosCtx.getContactedRegionNames(); if (!regionsContacted.isEmpty()) { tracer.setAttribute( "db.cosmosdb.regions_contacted", String.join(", ", regionsContacted), context); } tracer.end(errorMessage, finalError, context); } private void recordStoreResponseStatistics( List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics, Context context) { for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) { StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult(); StoreResponseDiagnostics storeResponseDiagnostics = storeResultDiagnostics.getStoreResponseDiagnostics(); Map<String, Object> attributes = new HashMap<>(); attributes.put("rntbd.url", storeResultDiagnostics.getStorePhysicalAddressAsString()); attributes.put("rntbd.resource_type", responseStatistics.getRequestResourceType().toString()); attributes.put("rntbd.operation_type", responseStatistics.getRequestOperationType().toString()); attributes.put("rntbd.region", responseStatistics.getRegionName()); if (storeResultDiagnostics.getLsn() > 0) { attributes.put("rntbd.lsn", Long.toString(storeResultDiagnostics.getLsn())); } if (storeResultDiagnostics.getGlobalCommittedLSN() > 0) { attributes.put("rntbd.gclsn", Long.toString(storeResultDiagnostics.getGlobalCommittedLSN())); } String responseSessionToken = responseStatistics.getRequestSessionToken(); if (responseSessionToken != null && !responseSessionToken.isEmpty()) { attributes.put("rntbd.session_token", responseSessionToken); } String requestSessionToken = responseStatistics.getRequestSessionToken(); if (requestSessionToken != null && !requestSessionToken.isEmpty()) { attributes.put("rntbd.request_session_token", requestSessionToken); } String activityId = storeResponseDiagnostics.getActivityId(); if (requestSessionToken != null && !requestSessionToken.isEmpty()) { attributes.put("rntbd.activity_id", activityId); } String pkRangeId = storeResponseDiagnostics.getPartitionKeyRangeId(); if (pkRangeId != null && !pkRangeId.isEmpty()) { attributes.put("rntbd.partition_key_range_id", pkRangeId); } attributes.put("rntbd.status_code", Integer.toString(storeResponseDiagnostics.getStatusCode())); if (storeResponseDiagnostics.getSubStatusCode() != 0) { attributes.put("rntbd.sub_status_code", Integer.toString(storeResponseDiagnostics.getSubStatusCode())); } if (storeResponseDiagnostics.getFaultInjectionRuleId() != null) { attributes.put("rntbd.fault_injection_rule_id", storeResponseDiagnostics.getFaultInjectionRuleId()); } Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs(); if (backendLatency != null) { attributes.put("rntbd.backend_latency", Double.toString(backendLatency)); } double requestCharge = storeResponseDiagnostics.getRequestCharge(); attributes.put("rntbd.request_charge", Double.toString(requestCharge)); Duration latency = responseStatistics.getDuration(); if (latency != null) { attributes.put("rntbd.latency", latency.toString()); } if (storeResponseDiagnostics.getRntbdChannelStatistics() != null) { attributes.put( "rntbd.is_new_channel", storeResponseDiagnostics.getRntbdChannelStatistics().isWaitForConnectionInit()); } OffsetDateTime startTime = null; for (RequestTimeline.Event event : storeResponseDiagnostics.getRequestTimeline()) { OffsetDateTime eventTime = event.getStartTime() != null ? event.getStartTime().atOffset(ZoneOffset.UTC) : null; if (eventTime != null && (startTime == null || startTime.isBefore(eventTime))) { startTime = eventTime; } Duration duration = event.getDuration(); if (duration == null || duration == Duration.ZERO) { continue; } attributes.put("rntbd.latency_" + event.getName().toLowerCase(Locale.ROOT), duration.toString()); } attributes.put("rntbd.request_size_bytes",storeResponseDiagnostics.getRequestPayloadLength()); attributes.put("rntbd.response_size_bytes",storeResponseDiagnostics.getResponsePayloadLength()); this.tracer.addEvent( "rntbd.request", attributes, startTime != null ? startTime : OffsetDateTime.now(), context); } } private void traceTransportLevelRequests( Collection<ClientSideRequestStatistics> clientSideRequestStatistics, Context context) { if (clientSideRequestStatistics != null) { for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) { recordStoreResponseStatistics( requestStatistics.getResponseStatisticsList(), context); recordStoreResponseStatistics( requestStatistics.getSupplementalResponseStatisticsList(), context); } } } private void traceTransportLevel(CosmosDiagnosticsContext diagnosticsContext, Context context) { Collection<ClientSideRequestStatistics> combinedClientSideRequestStatistics = ctxAccessor.getDistinctCombinedClientSideRequestStatistics(diagnosticsContext); traceTransportLevelRequests( combinedClientSideRequestStatistics, context); } }
net.peer.name here will have the full hostname of the ServiceEndpoint - identifying the account + cloud - so, the Azure resource
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) { StartSpanOptions spanOptions = new StartSpanOptions(SpanKind.CLIENT) .setAttribute(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME) .setAttribute("db.system","cosmosdb") .setAttribute("db.operation",spanName) .setAttribute("net.peer.name",cosmosCtx.getAccountName()); String databaseId = cosmosCtx.getDatabaseName(); if (databaseId != null) { spanOptions.setAttribute("db.name", databaseId); } return spanOptions; }
.setAttribute("net.peer.name",cosmosCtx.getAccountName());
private StartSpanOptions startSpanOptions(String spanName, CosmosDiagnosticsContext cosmosCtx) { StartSpanOptions spanOptions; if (tracer instanceof EnabledNoOpTracer) { spanOptions = new StartSpanOptions(SpanKind.INTERNAL); } else { spanOptions = new StartSpanOptions(SpanKind.INTERNAL) .setAttribute("db.system", "cosmosdb") .setAttribute("db.operation", spanName) .setAttribute("net.peer.name", cosmosCtx.getAccountName()) .setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType()) .setAttribute("db.cosmosdb.resource_type",cosmosCtx.getResourceType()) .setAttribute("db.name", cosmosCtx.getDatabaseName()) .setAttribute("db.cosmosdb.client_id", this.clientId) .setAttribute("user_agent.original", this.userAgent) .setAttribute("db.cosmosdb.connection_mode", this.connectionMode); if (!cosmosCtx.getOperationId().isEmpty() && !cosmosCtx.getOperationId().equals(ctxAccessor.getSpanName(cosmosCtx))) { spanOptions.setAttribute("db.cosmosdb.operation_id", cosmosCtx.getOperationId()); } String containerName = cosmosCtx.getContainerName(); if (containerName != null) { spanOptions.setAttribute("db.cosmosdb.container", containerName); } } return spanOptions; }
class OpenTelemetryCosmosTracer implements CosmosTracer { private final Tracer tracer; public OpenTelemetryCosmosTracer(Tracer tracer) { checkNotNull(tracer, "Argument 'tracer' must not be null."); this.tracer = tracer; } @Override public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) { checkNotNull(spanName, "Argument 'spanName' must not be null."); checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null."); Context local = Objects .requireNonNull(context, "'context' cannot be null.") .addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx); StartSpanOptions spanOptions = this.startSpanOptions( spanName, cosmosCtx); return tracer.start(spanName, spanOptions, local); } @Override public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context) { String errorMessage = null; if (cosmosCtx == null) { return; } if (!cosmosCtx.hasCompleted()) { tracer.end("CosmosCtx not completed yet.", null, context); } if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) { tracer.end(errorMessage, finalError, context); Map<String, Object> attributes = new HashMap<>(); } Throwable finalError = cosmosCtx.getFinalError(); if (finalError != null && cosmosCtx.isFailure()) { if (finalError instanceof CosmosException) { CosmosException cosmosException = (CosmosException)finalError; errorMessage = cosmosException.getMessageWithoutDiagnostics(); } else { errorMessage = finalError.getMessage(); } tracer.setAttribute("exception.type", finalError.getClass().getCanonicalName(), context); tracer.setAttribute("exception.message", errorMessage, context); StringWriter stackWriter = new StringWriter(); PrintWriter printWriter = new PrintWriter(stackWriter); finalError.printStackTrace(printWriter); printWriter.flush(); stackWriter.flush(); tracer.setAttribute("exception.stacktrace", stackWriter.toString(), context); printWriter.close(); try { stackWriter.close(); } catch (IOException e) { LOGGER.warn("Error trying to close StringWriter.", e); } } tracer.setAttribute("db.cosmosdb.operation_type",cosmosCtx.getOperationType(), context); tracer.setAttribute("db.cosmosdb.container",cosmosCtx.getContainerName(), context); tracer.setAttribute( "db.cosmosdb.status_code", Integer.toString(cosmosCtx.getStatusCode()), context); tracer.setAttribute( "db.cosmosdb.sub_status_code", Integer.toString(cosmosCtx.getSubStatusCode()), context); tracer.setAttribute( "db.cosmosdb.request_charge", Float.toString(cosmosCtx.getTotalRequestCharge()), context); tracer.setAttribute("db.cosmosdb.max_request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context); tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context); tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context); Set<String> regionsContacted = cosmosCtx.getContactedRegionNames(); if (regionsContacted != null && !regionsContacted.isEmpty()) { tracer.setAttribute( "db.cosmosdb.regions_contacted", String.join(", ", regionsContacted), context); } tracer.end(errorMessage, finalError, context); } }
class OpenTelemetryCosmosTracer implements CosmosTracer { private final Tracer tracer; private final CosmosClientTelemetryConfig config; private final String clientId; private final String connectionMode; private final String userAgent; public OpenTelemetryCosmosTracer( Tracer tracer, CosmosClientTelemetryConfig config, String clientId, String userAgent, String connectionMode) { checkNotNull(tracer, "Argument 'tracer' must not be null."); checkNotNull(config, "Argument 'config' must not be null."); checkNotNull(clientId, "Argument 'clientId' must not be null."); checkNotNull(userAgent, "Argument 'userAgent' must not be null."); checkNotNull(connectionMode, "Argument 'connectionMode' must not be null."); this.tracer = tracer; this.config = config; this.clientId = clientId; this.userAgent = userAgent; this.connectionMode = connectionMode; } private boolean isTransportLevelTracingEnabled() { return clientTelemetryConfigAccessor.isTransportLevelTracingEnabled(this.config); } @Override public Context startSpan(String spanName, CosmosDiagnosticsContext cosmosCtx, Context context) { checkNotNull(spanName, "Argument 'spanName' must not be null."); checkNotNull(cosmosCtx, "Argument 'cosmosCtx' must not be null."); Context local = Objects .requireNonNull(context, "'context' cannot be null.") .addData(COSMOS_DIAGNOSTICS_CONTEXT_KEY, cosmosCtx); StartSpanOptions spanOptions = this.startSpanOptions( spanName, cosmosCtx); return tracer.start(spanName, spanOptions, local); } @Override public void endSpan(CosmosDiagnosticsContext cosmosCtx, Context context) { if (cosmosCtx == null) { return; } if (!cosmosCtx.isCompleted()) { tracer.end("CosmosCtx not completed yet.", null, context); return; } String errorMessage = null; Throwable finalError = cosmosCtx.getFinalError(); if (finalError != null && cosmosCtx.isFailure()) { if (finalError instanceof CosmosException) { CosmosException cosmosException = (CosmosException) finalError; errorMessage = cosmosException.getShortMessage(); } else { errorMessage = finalError.getMessage(); } } if (tracer instanceof EnabledNoOpTracer) { tracer.end(errorMessage, finalError, context); return; } if (cosmosCtx.isFailure() || cosmosCtx.isThresholdViolated()) { Map<String, Object> attributes = new HashMap<>(); attributes.put("Diagnostics", cosmosCtx.toJson()); if (cosmosCtx.isFailure()) { tracer.addEvent("failure", attributes, OffsetDateTime.now(), context); } else { tracer.addEvent("threshold_violation", attributes, OffsetDateTime.now(), context); } } if (finalError != null) { String exceptionType; if (finalError instanceof CosmosException) { exceptionType = CosmosException.class.getCanonicalName(); } else { exceptionType = finalError.getClass().getCanonicalName(); } tracer.setAttribute("exception.escaped", Boolean.toString(cosmosCtx.isFailure()), context); tracer.setAttribute("exception.type", exceptionType, context); tracer.setAttribute("exception.message", errorMessage, context); tracer.setAttribute("exception.stacktrace", prettifyCallstack(finalError), context); } if (this.isTransportLevelTracingEnabled()) { traceTransportLevel(cosmosCtx, context); } tracer.setAttribute( "db.cosmosdb.status_code", Integer.toString(cosmosCtx.getStatusCode()), context); tracer.setAttribute( "db.cosmosdb.sub_status_code", Integer.toString(cosmosCtx.getSubStatusCode()), context); tracer.setAttribute( "db.cosmosdb.request_charge", Float.toString(cosmosCtx.getTotalRequestCharge()), context); tracer.setAttribute("db.cosmosdb.request_content_length",cosmosCtx.getMaxRequestPayloadSizeInBytes(), context); tracer.setAttribute("db.cosmosdb.max_response_content_length_bytes",cosmosCtx.getMaxResponsePayloadSizeInBytes(), context); tracer.setAttribute("db.cosmosdb.retry_count",cosmosCtx.getRetryCount() , context); Set<String> regionsContacted = cosmosCtx.getContactedRegionNames(); if (!regionsContacted.isEmpty()) { tracer.setAttribute( "db.cosmosdb.regions_contacted", String.join(", ", regionsContacted), context); } tracer.end(errorMessage, finalError, context); } private void recordStoreResponseStatistics( List<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics, Context context) { for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) { StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult(); StoreResponseDiagnostics storeResponseDiagnostics = storeResultDiagnostics.getStoreResponseDiagnostics(); Map<String, Object> attributes = new HashMap<>(); attributes.put("rntbd.url", storeResultDiagnostics.getStorePhysicalAddressAsString()); attributes.put("rntbd.resource_type", responseStatistics.getRequestResourceType().toString()); attributes.put("rntbd.operation_type", responseStatistics.getRequestOperationType().toString()); attributes.put("rntbd.region", responseStatistics.getRegionName()); if (storeResultDiagnostics.getLsn() > 0) { attributes.put("rntbd.lsn", Long.toString(storeResultDiagnostics.getLsn())); } if (storeResultDiagnostics.getGlobalCommittedLSN() > 0) { attributes.put("rntbd.gclsn", Long.toString(storeResultDiagnostics.getGlobalCommittedLSN())); } String responseSessionToken = responseStatistics.getRequestSessionToken(); if (responseSessionToken != null && !responseSessionToken.isEmpty()) { attributes.put("rntbd.session_token", responseSessionToken); } String requestSessionToken = responseStatistics.getRequestSessionToken(); if (requestSessionToken != null && !requestSessionToken.isEmpty()) { attributes.put("rntbd.request_session_token", requestSessionToken); } String activityId = storeResponseDiagnostics.getActivityId(); if (requestSessionToken != null && !requestSessionToken.isEmpty()) { attributes.put("rntbd.activity_id", activityId); } String pkRangeId = storeResponseDiagnostics.getPartitionKeyRangeId(); if (pkRangeId != null && !pkRangeId.isEmpty()) { attributes.put("rntbd.partition_key_range_id", pkRangeId); } attributes.put("rntbd.status_code", Integer.toString(storeResponseDiagnostics.getStatusCode())); if (storeResponseDiagnostics.getSubStatusCode() != 0) { attributes.put("rntbd.sub_status_code", Integer.toString(storeResponseDiagnostics.getSubStatusCode())); } if (storeResponseDiagnostics.getFaultInjectionRuleId() != null) { attributes.put("rntbd.fault_injection_rule_id", storeResponseDiagnostics.getFaultInjectionRuleId()); } Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs(); if (backendLatency != null) { attributes.put("rntbd.backend_latency", Double.toString(backendLatency)); } double requestCharge = storeResponseDiagnostics.getRequestCharge(); attributes.put("rntbd.request_charge", Double.toString(requestCharge)); Duration latency = responseStatistics.getDuration(); if (latency != null) { attributes.put("rntbd.latency", latency.toString()); } if (storeResponseDiagnostics.getRntbdChannelStatistics() != null) { attributes.put( "rntbd.is_new_channel", storeResponseDiagnostics.getRntbdChannelStatistics().isWaitForConnectionInit()); } OffsetDateTime startTime = null; for (RequestTimeline.Event event : storeResponseDiagnostics.getRequestTimeline()) { OffsetDateTime eventTime = event.getStartTime() != null ? event.getStartTime().atOffset(ZoneOffset.UTC) : null; if (eventTime != null && (startTime == null || startTime.isBefore(eventTime))) { startTime = eventTime; } Duration duration = event.getDuration(); if (duration == null || duration == Duration.ZERO) { continue; } attributes.put("rntbd.latency_" + event.getName().toLowerCase(Locale.ROOT), duration.toString()); } attributes.put("rntbd.request_size_bytes",storeResponseDiagnostics.getRequestPayloadLength()); attributes.put("rntbd.response_size_bytes",storeResponseDiagnostics.getResponsePayloadLength()); this.tracer.addEvent( "rntbd.request", attributes, startTime != null ? startTime : OffsetDateTime.now(), context); } } private void traceTransportLevelRequests( Collection<ClientSideRequestStatistics> clientSideRequestStatistics, Context context) { if (clientSideRequestStatistics != null) { for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) { recordStoreResponseStatistics( requestStatistics.getResponseStatisticsList(), context); recordStoreResponseStatistics( requestStatistics.getSupplementalResponseStatisticsList(), context); } } } private void traceTransportLevel(CosmosDiagnosticsContext diagnosticsContext, Context context) { Collection<ClientSideRequestStatistics> combinedClientSideRequestStatistics = ctxAccessor.getDistinctCombinedClientSideRequestStatistics(diagnosticsContext); traceTransportLevelRequests( combinedClientSideRequestStatistics, context); } }
What does this mean `TARGET_PROTOCOL.contains(key)`?
protected void setCustomHeaders(MessageHeaders headers, EventData azureMessage) { super.setCustomHeaders(headers, azureMessage); Set<String> ignoredHeaders = new HashSet<>(); headers.forEach((key, value) -> { if (IGNORED_SPRING_MESSAGE_HEADERS.contains(key)) { ignoredHeaders.add(key); } else if (TARGET_PROTOCOL.contains(key)) { azureMessage.getProperties().put(key, "amqp"); } else { azureMessage.getProperties().put(key, value.toString()); } }); ignoredHeaders.forEach(header -> LOGGER.info("Message headers {} is not supported to be set and will be " + "ignored.", header)); }
} else if (TARGET_PROTOCOL.contains(key)) {
protected void setCustomHeaders(MessageHeaders headers, EventData azureMessage) { super.setCustomHeaders(headers, azureMessage); Set<String> ignoredHeaders = new HashSet<>(); headers.forEach((key, value) -> { if (IGNORED_SPRING_MESSAGE_HEADERS.contains(key)) { ignoredHeaders.add(key); } else { azureMessage.getProperties().put(key, value.toString()); } }); if (azureMessage.getProperties().containsKey(TARGET_PROTOCOL)) { azureMessage.getProperties().put(TARGET_PROTOCOL, "amqp"); } ignoredHeaders.forEach(header -> LOGGER.info("Message headers {} is not supported to be set and will be " + "ignored.", header)); }
class EventHubsMessageConverter extends AbstractAzureMessageConverter<EventData, EventData> { private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsMessageConverter.class); static final String TARGET_PROTOCOL = "target-protocol"; private static final Set<String> IGNORED_SPRING_MESSAGE_HEADERS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( AzureHeaders.PARTITION_KEY, AzureHeaders.BATCH_CONVERTED_PARTITION_KEY, EventHubsHeaders.ENQUEUED_TIME, EventHubsHeaders.BATCH_CONVERTED_ENQUEUED_TIME, EventHubsHeaders.OFFSET, EventHubsHeaders.BATCH_CONVERTED_OFFSET, EventHubsHeaders.SEQUENCE_NUMBER, EventHubsHeaders.BATCH_CONVERTED_SEQUENCE_NUMBER, EventHubsHeaders.BATCH_CONVERTED_SYSTEM_PROPERTIES, EventHubsHeaders.BATCH_CONVERTED_APPLICATION_PROPERTIES ))); private final ObjectMapper objectMapper; /** * Construct the message converter with default {@code ObjectMapper}. */ public EventHubsMessageConverter() { this(OBJECT_MAPPER); } /** * Construct the message converter with customized {@code ObjectMapper}. * @param objectMapper the object mapper. */ public EventHubsMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(EventData azureMessage) { return azureMessage.getBody(); } @Override protected EventData fromString(String payload) { return new EventData(payload.getBytes(StandardCharsets.UTF_8)); } @Override protected EventData fromByte(byte[] payload) { return new EventData(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(EventData azureMessage) { Map<String, Object> headers = super.buildCustomHeaders(azureMessage); headers.putAll(getSystemProperties(azureMessage)); headers.putAll(azureMessage.getProperties()); return headers; } private Map<String, Object> getSystemProperties(EventData azureMessage) { Map<String, Object> result = new HashMap<>(azureMessage.getSystemProperties()); result.put(EventHubsHeaders.ENQUEUED_TIME, azureMessage.getEnqueuedTime()); result.put(EventHubsHeaders.OFFSET, azureMessage.getOffset()); result.put(EventHubsHeaders.SEQUENCE_NUMBER, azureMessage.getSequenceNumber()); result.put(AzureHeaders.PARTITION_KEY, azureMessage.getPartitionKey()); return result; } }
class EventHubsMessageConverter extends AbstractAzureMessageConverter<EventData, EventData> { private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsMessageConverter.class); static final String TARGET_PROTOCOL = "target-protocol"; private static final Set<String> IGNORED_SPRING_MESSAGE_HEADERS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( AzureHeaders.PARTITION_KEY, AzureHeaders.BATCH_CONVERTED_PARTITION_KEY, EventHubsHeaders.ENQUEUED_TIME, EventHubsHeaders.BATCH_CONVERTED_ENQUEUED_TIME, EventHubsHeaders.OFFSET, EventHubsHeaders.BATCH_CONVERTED_OFFSET, EventHubsHeaders.SEQUENCE_NUMBER, EventHubsHeaders.BATCH_CONVERTED_SEQUENCE_NUMBER, EventHubsHeaders.BATCH_CONVERTED_SYSTEM_PROPERTIES, EventHubsHeaders.BATCH_CONVERTED_APPLICATION_PROPERTIES ))); private final ObjectMapper objectMapper; /** * Construct the message converter with default {@code ObjectMapper}. */ public EventHubsMessageConverter() { this(OBJECT_MAPPER); } /** * Construct the message converter with customized {@code ObjectMapper}. * @param objectMapper the object mapper. */ public EventHubsMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(EventData azureMessage) { return azureMessage.getBody(); } @Override protected EventData fromString(String payload) { return new EventData(payload.getBytes(StandardCharsets.UTF_8)); } @Override protected EventData fromByte(byte[] payload) { return new EventData(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(EventData azureMessage) { Map<String, Object> headers = super.buildCustomHeaders(azureMessage); headers.putAll(getSystemProperties(azureMessage)); headers.putAll(azureMessage.getProperties()); return headers; } private Map<String, Object> getSystemProperties(EventData azureMessage) { Map<String, Object> result = new HashMap<>(azureMessage.getSystemProperties()); result.put(EventHubsHeaders.ENQUEUED_TIME, azureMessage.getEnqueuedTime()); result.put(EventHubsHeaders.OFFSET, azureMessage.getOffset()); result.put(EventHubsHeaders.SEQUENCE_NUMBER, azureMessage.getSequenceNumber()); result.put(AzureHeaders.PARTITION_KEY, azureMessage.getPartitionKey()); return result; } }
Do we need to verify the right exception info/message is in the exception (here and below) or just verifying getting the exception is ok and standard practice for the SDK tests?
public void openConnectionsAndInitCachesWithInvalidCosmosClientConfig(List<String> preferredRegions, int numProactiveConnectionRegions, int numContainers) { CosmosAsyncClient clientWithOpenConnections = null; List<CosmosAsyncContainer> asyncContainers = new ArrayList<>(); List<CosmosContainerIdentity> cosmosContainerIdentities = new ArrayList<>(); for (int i = 1; i <= numContainers; i++) { String containerId = String.format("id%d", i); cosmosAsyncDatabase.createContainerIfNotExists(containerId, "/mypk").block(); asyncContainers.add(cosmosAsyncDatabase.getContainer(containerId)); cosmosContainerIdentities.add(new CosmosContainerIdentity(cosmosAsyncDatabase.getId(), containerId)); } if (numProactiveConnectionRegions > 5) { try { CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); fail("Should have thrown exception"); } catch (IllegalArgumentException illegalArgEx) {} } else { CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); try { clientWithOpenConnections = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .endpointDiscoveryEnabled(true) .preferredRegions(preferredRegions) .openConnectionsAndInitCaches(proactiveContainerInitConfig) .directMode() .buildAsyncClient(); fail("Should have thrown exception"); } catch (IllegalArgumentException illegalArgEx) {} } }
} catch (IllegalArgumentException illegalArgEx) {}
public void openConnectionsAndInitCachesWithInvalidCosmosClientConfig(List<String> preferredRegions, int numProactiveConnectionRegions, int numContainers) { CosmosAsyncClient clientWithOpenConnections = null; List<CosmosAsyncContainer> asyncContainers = new ArrayList<>(); List<CosmosContainerIdentity> cosmosContainerIdentities = new ArrayList<>(); for (int i = 1; i <= numContainers; i++) { String containerId = String.format("id%d", i); cosmosAsyncDatabase.createContainerIfNotExists(containerId, "/mypk").block(); asyncContainers.add(cosmosAsyncDatabase.getContainer(containerId)); cosmosContainerIdentities.add(new CosmosContainerIdentity(cosmosAsyncDatabase.getId(), containerId)); } if (numProactiveConnectionRegions > 5) { try { CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); fail("Should have thrown exception"); } catch (IllegalArgumentException illegalArgEx) {} } else { CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); try { clientWithOpenConnections = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .endpointDiscoveryEnabled(true) .preferredRegions(preferredRegions) .openConnectionsAndInitCaches(proactiveContainerInitConfig) .directMode() .buildAsyncClient(); fail("Should have thrown exception"); } catch (IllegalArgumentException illegalArgEx) {} } }
class ProactiveConnectionManagementTest extends TestSuiteBase { private CosmosClientBuilder clientBuilder; private DatabaseAccount databaseAccount; private CosmosAsyncDatabase cosmosAsyncDatabase; @BeforeClass(groups = {"multi-region"}) public void beforeClass() { clientBuilder = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode(); CosmosAsyncClient dummyClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode().buildAsyncClient(); this.cosmosAsyncDatabase = getSharedCosmosDatabase(dummyClient); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(dummyClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); this.databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); safeClose(dummyClient); } @Test(groups = {"multi-region"}, dataProvider = "invalidProactiveContainerInitConfigs") @Test(groups = {"multi-region"}, dataProvider = "proactiveContainerInitConfigs") public void openConnectionsAndInitCachesWithCosmosClient(List<String> preferredRegions, int numProactiveConnectionRegions, int numContainers) { CosmosAsyncClient clientWithOpenConnections = null; try { List<CosmosAsyncContainer> asyncContainers = new ArrayList<>(); List<CosmosContainerIdentity> cosmosContainerIdentities = new ArrayList<>(); for (int i = 1; i <= numContainers; i++) { String containerId = String.format("id%d", i); cosmosAsyncDatabase.createContainerIfNotExists(containerId, "/mypk").block(); asyncContainers.add(cosmosAsyncDatabase.getContainer(containerId)); cosmosContainerIdentities.add(new CosmosContainerIdentity(cosmosAsyncDatabase.getId(), containerId)); } CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); clientWithOpenConnections = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .endpointDiscoveryEnabled(true) .preferredRegions(preferredRegions) .openConnectionsAndInitCaches(proactiveContainerInitConfig) .directMode() .buildAsyncClient(); RntbdTransportClient rntbdTransportClient = (RntbdTransportClient) ReflectionUtils.getTransportClient(clientWithOpenConnections); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(clientWithOpenConnections); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalAddressResolver globalAddressResolver = ReflectionUtils.getGlobalAddressResolver(rxDocumentClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); RntbdEndpoint.Provider provider = ReflectionUtils.getRntbdEndpointProvider(rntbdTransportClient); ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient); ConcurrentHashMap<String, ?> collectionInfoByNameMap = getCollectionInfoByNameMap(rxDocumentClient); Set<String> endpoints = ConcurrentHashMap.newKeySet(); UnmodifiableList<URI> readEndpoints = globalEndpointManager.getReadEndpoints(); List<URI> proactiveConnectionEndpoints = readEndpoints.subList( 0, Math.min(readEndpoints.size(), proactiveContainerInitConfig.getNumProactiveConnectionRegions())); Flux<CosmosAsyncContainer> asyncContainerFlux = Flux.fromIterable(asyncContainers); Flux<Utils.ValueHolder<List<PartitionKeyRange>>> partitionKeyRangeFlux = Flux.fromIterable(asyncContainers) .flatMap(CosmosAsyncContainer::read) .flatMap(containerResponse -> rxDocumentClient .getPartitionKeyRangeCache() .tryGetOverlappingRangesAsync( null, containerResponse.getProperties().getResourceId(), PartitionKeyInternalHelper.FullRange, false, null)); for (URI proactiveConnectionEndpoint : proactiveConnectionEndpoints) { Flux.zip(asyncContainerFlux, partitionKeyRangeFlux) .flatMapIterable(containerToPartitionKeyRanges -> { List<ImmutablePair<PartitionKeyRange, CosmosAsyncContainer>> pkrToContainer = new ArrayList<>(); for (PartitionKeyRange pkr : containerToPartitionKeyRanges.getT2().v) { pkrToContainer.add(new ImmutablePair<>(pkr, containerToPartitionKeyRanges.getT1())); } return pkrToContainer; }) .flatMap(partitionKeyRangeToContainer -> { RxDocumentServiceRequest dummyRequest = RxDocumentServiceRequest.createFromName( mockDiagnosticsClientContext(), OperationType.Read, partitionKeyRangeToContainer.getRight().getLink() + "/docId", ResourceType.Document); dummyRequest.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(partitionKeyRangeToContainer.getLeft().getId())); return globalAddressResolver.resolveAsync(dummyRequest, false); }) .delayElements(Duration.ofSeconds(3)) .doOnNext(addressInformations -> { for (AddressInformation address : addressInformations) { endpoints.add(address.getPhysicalUri().getURI().getAuthority()); } }) .blockLast(); globalEndpointManager.markEndpointUnavailableForRead(proactiveConnectionEndpoint); } assertThat(provider.count()).isEqualTo(endpoints.size()); assertThat(collectionInfoByNameMap.size()).isEqualTo(cosmosContainerIdentities.size()); assertThat(routingMap.size()).isEqualTo(cosmosContainerIdentities.size()); for (CosmosAsyncContainer asyncContainer : asyncContainers) { asyncContainer.delete().block(); } } finally { safeClose(clientWithOpenConnections); } } @Test(groups = {"multi-region"}, dataProvider = "proactiveContainerInitConfigs") public void openConnectionsAndInitCachesWithContainer(List<String> preferredRegions, int numProactiveConnectionRegions, int ignore) { CosmosAsyncClient asyncClient = null; try { asyncClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .endpointDiscoveryEnabled(true) .preferredRegions(preferredRegions) .directMode() .buildAsyncClient(); cosmosAsyncDatabase = getSharedCosmosDatabase(asyncClient); List<CosmosContainerIdentity> cosmosContainerIdentities = new ArrayList<>(); String containerId = "id1"; cosmosAsyncDatabase.createContainerIfNotExists(containerId, "/mypk").block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(containerId); cosmosContainerIdentities.add(new CosmosContainerIdentity(cosmosAsyncDatabase.getId(), containerId)); CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); RntbdTransportClient rntbdTransportClient = (RntbdTransportClient) ReflectionUtils.getTransportClient(asyncClient); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(asyncClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalAddressResolver globalAddressResolver = ReflectionUtils.getGlobalAddressResolver(rxDocumentClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); RntbdEndpoint.Provider provider = ReflectionUtils.getRntbdEndpointProvider(rntbdTransportClient); ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient); ConcurrentHashMap<String, ?> collectionInfoByNameMap = getCollectionInfoByNameMap(rxDocumentClient); Set<String> endpoints = ConcurrentHashMap.newKeySet(); cosmosAsyncContainer.openConnectionsAndInitCaches(numProactiveConnectionRegions).block(); UnmodifiableList<URI> readEndpoints = globalEndpointManager.getReadEndpoints(); List<URI> proactiveConnectionEndpoints = readEndpoints.subList( 0, Math.min(readEndpoints.size(),proactiveContainerInitConfig.getNumProactiveConnectionRegions())); Mono<CosmosAsyncContainer> asyncContainerMono = Mono.just(cosmosAsyncContainer); Mono<Utils.ValueHolder<List<PartitionKeyRange>>> partitionKeyRangeMono = Mono.just(cosmosAsyncContainer) .flatMap(CosmosAsyncContainer::read) .flatMap(containerResponse -> rxDocumentClient .getPartitionKeyRangeCache() .tryGetOverlappingRangesAsync( null, containerResponse.getProperties().getResourceId(), PartitionKeyInternalHelper.FullRange, false, null)); for (URI proactiveConnectionEndpoint : proactiveConnectionEndpoints) { Mono.zip(asyncContainerMono, partitionKeyRangeMono) .flatMapIterable(containerToPartitionKeyRanges -> { List<ImmutablePair<PartitionKeyRange, CosmosAsyncContainer>> pkrToContainer = new ArrayList<>(); for (PartitionKeyRange pkr : containerToPartitionKeyRanges.getT2().v) { pkrToContainer.add(new ImmutablePair<>(pkr, containerToPartitionKeyRanges.getT1())); } return pkrToContainer; }) .flatMap(partitionKeyRangeToContainer -> { RxDocumentServiceRequest dummyRequest = RxDocumentServiceRequest.createFromName( mockDiagnosticsClientContext(), OperationType.Read, partitionKeyRangeToContainer.getRight().getLink() + "/docId", ResourceType.Document); dummyRequest.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(partitionKeyRangeToContainer.getLeft().getId())); return globalAddressResolver.resolveAsync(dummyRequest, false); }) .delayElements(Duration.ofSeconds(3)) .doOnNext(addressInformations -> { for (AddressInformation address : addressInformations) { endpoints.add(address.getPhysicalUri().getURI().getAuthority()); } }) .blockLast(); globalEndpointManager.markEndpointUnavailableForRead(proactiveConnectionEndpoint); } assertThat(provider.count()).isEqualTo(endpoints.size()); assertThat(collectionInfoByNameMap.size()).isEqualTo(cosmosContainerIdentities.size()); assertThat(routingMap.size()).isEqualTo(cosmosContainerIdentities.size()); cosmosAsyncContainer.delete().block(); } finally { safeClose(asyncClient); } } @DataProvider(name = "proactiveContainerInitConfigs") private Object[][] proactiveContainerInitConfigs() { Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); List<String> preferredLocations = new ArrayList<>(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations.add(accountLocation.getName()); preferredLocations.add("EastUS"); } return new Object[][] { new Object[]{preferredLocations, 1, 3}, new Object[]{preferredLocations, 2, 1} }; } @DataProvider(name = "invalidProactiveContainerInitConfigs") private Object[][] invalidProactiveContainerInitConfigs() { Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); List<String> preferredLocations = new ArrayList<>(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations.add(accountLocation.getName()); } return new Object[][] { new Object[]{preferredLocations, 2, 1}, new Object[]{ Collections.unmodifiableList( Arrays.asList("R1", "R2", "R3", "R4", "R5", "R6")), 6, 1 } }; } private ConcurrentHashMap<String, ?> getCollectionInfoByNameMap(RxDocumentClientImpl rxDocumentClient) { RxClientCollectionCache collectionCache = ReflectionUtils.getClientCollectionCache(rxDocumentClient); AsyncCache<String, DocumentCollection> collectionInfoByNameCache = ReflectionUtils.getCollectionInfoByNameCache(collectionCache); return ReflectionUtils.getValueMap(collectionInfoByNameCache); } private ConcurrentHashMap<String, ?> getRoutingMap(RxDocumentClientImpl rxDocumentClient) { RxPartitionKeyRangeCache partitionKeyRangeCache = ReflectionUtils.getPartitionKeyRangeCache(rxDocumentClient); AsyncCacheNonBlocking<String, CollectionRoutingMap> routingMapAsyncCache = ReflectionUtils.getRoutingMapAsyncCacheNonBlocking(partitionKeyRangeCache); return ReflectionUtils.getValueMapNonBlockingCache(routingMapAsyncCache); } }
class ProactiveConnectionManagementTest extends TestSuiteBase { private CosmosClientBuilder clientBuilder; private DatabaseAccount databaseAccount; private CosmosAsyncDatabase cosmosAsyncDatabase; @BeforeClass(groups = {"multi-region"}) public void beforeClass() { clientBuilder = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode(); CosmosAsyncClient dummyClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode().buildAsyncClient(); this.cosmosAsyncDatabase = getSharedCosmosDatabase(dummyClient); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(dummyClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); this.databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); safeClose(dummyClient); } @Test(groups = {"multi-region"}, dataProvider = "invalidProactiveContainerInitConfigs") @Test(groups = {"multi-region"}, dataProvider = "proactiveContainerInitConfigs") public void openConnectionsAndInitCachesWithCosmosClient(List<String> preferredRegions, int numProactiveConnectionRegions, int numContainers) { CosmosAsyncClient clientWithOpenConnections = null; try { List<CosmosAsyncContainer> asyncContainers = new ArrayList<>(); List<CosmosContainerIdentity> cosmosContainerIdentities = new ArrayList<>(); for (int i = 1; i <= numContainers; i++) { String containerId = String.format("id%d", i); cosmosAsyncDatabase.createContainerIfNotExists(containerId, "/mypk").block(); asyncContainers.add(cosmosAsyncDatabase.getContainer(containerId)); cosmosContainerIdentities.add(new CosmosContainerIdentity(cosmosAsyncDatabase.getId(), containerId)); } CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); clientWithOpenConnections = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .endpointDiscoveryEnabled(true) .preferredRegions(preferredRegions) .openConnectionsAndInitCaches(proactiveContainerInitConfig) .directMode() .buildAsyncClient(); RntbdTransportClient rntbdTransportClient = (RntbdTransportClient) ReflectionUtils.getTransportClient(clientWithOpenConnections); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(clientWithOpenConnections); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalAddressResolver globalAddressResolver = ReflectionUtils.getGlobalAddressResolver(rxDocumentClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); RntbdEndpoint.Provider provider = ReflectionUtils.getRntbdEndpointProvider(rntbdTransportClient); ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient); ConcurrentHashMap<String, ?> collectionInfoByNameMap = getCollectionInfoByNameMap(rxDocumentClient); Set<String> endpoints = ConcurrentHashMap.newKeySet(); UnmodifiableList<URI> readEndpoints = globalEndpointManager.getReadEndpoints(); List<URI> proactiveConnectionEndpoints = readEndpoints.subList( 0, Math.min(readEndpoints.size(), proactiveContainerInitConfig.getNumProactiveConnectionRegions())); Flux<CosmosAsyncContainer> asyncContainerFlux = Flux.fromIterable(asyncContainers); Flux<Utils.ValueHolder<List<PartitionKeyRange>>> partitionKeyRangeFlux = Flux.fromIterable(asyncContainers) .flatMap(CosmosAsyncContainer::read) .flatMap(containerResponse -> rxDocumentClient .getPartitionKeyRangeCache() .tryGetOverlappingRangesAsync( null, containerResponse.getProperties().getResourceId(), PartitionKeyInternalHelper.FullRange, false, null)); for (URI proactiveConnectionEndpoint : proactiveConnectionEndpoints) { Flux.zip(asyncContainerFlux, partitionKeyRangeFlux) .flatMapIterable(containerToPartitionKeyRanges -> { List<ImmutablePair<PartitionKeyRange, CosmosAsyncContainer>> pkrToContainer = new ArrayList<>(); for (PartitionKeyRange pkr : containerToPartitionKeyRanges.getT2().v) { pkrToContainer.add(new ImmutablePair<>(pkr, containerToPartitionKeyRanges.getT1())); } return pkrToContainer; }) .flatMap(partitionKeyRangeToContainer -> { RxDocumentServiceRequest dummyRequest = RxDocumentServiceRequest.createFromName( mockDiagnosticsClientContext(), OperationType.Read, partitionKeyRangeToContainer.getRight().getLink() + "/docId", ResourceType.Document); dummyRequest.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(partitionKeyRangeToContainer.getLeft().getId())); return globalAddressResolver.resolveAsync(dummyRequest, false); }) .delayElements(Duration.ofSeconds(3)) .doOnNext(addressInformations -> { for (AddressInformation address : addressInformations) { endpoints.add(address.getPhysicalUri().getURI().getAuthority()); } }) .blockLast(); globalEndpointManager.markEndpointUnavailableForRead(proactiveConnectionEndpoint); } assertThat(provider.count()).isEqualTo(endpoints.size()); assertThat(collectionInfoByNameMap.size()).isEqualTo(cosmosContainerIdentities.size()); assertThat(routingMap.size()).isEqualTo(cosmosContainerIdentities.size()); for (CosmosAsyncContainer asyncContainer : asyncContainers) { asyncContainer.delete().block(); } } finally { safeClose(clientWithOpenConnections); } } @Test(groups = {"multi-region"}, dataProvider = "proactiveContainerInitConfigs") public void openConnectionsAndInitCachesWithContainer(List<String> preferredRegions, int numProactiveConnectionRegions, int ignore) { CosmosAsyncClient asyncClient = null; try { asyncClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .endpointDiscoveryEnabled(true) .preferredRegions(preferredRegions) .directMode() .buildAsyncClient(); cosmosAsyncDatabase = getSharedCosmosDatabase(asyncClient); List<CosmosContainerIdentity> cosmosContainerIdentities = new ArrayList<>(); String containerId = "id1"; cosmosAsyncDatabase.createContainerIfNotExists(containerId, "/mypk").block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(containerId); cosmosContainerIdentities.add(new CosmosContainerIdentity(cosmosAsyncDatabase.getId(), containerId)); CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); RntbdTransportClient rntbdTransportClient = (RntbdTransportClient) ReflectionUtils.getTransportClient(asyncClient); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(asyncClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalAddressResolver globalAddressResolver = ReflectionUtils.getGlobalAddressResolver(rxDocumentClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); RntbdEndpoint.Provider provider = ReflectionUtils.getRntbdEndpointProvider(rntbdTransportClient); ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient); ConcurrentHashMap<String, ?> collectionInfoByNameMap = getCollectionInfoByNameMap(rxDocumentClient); Set<String> endpoints = ConcurrentHashMap.newKeySet(); cosmosAsyncContainer.openConnectionsAndInitCaches(numProactiveConnectionRegions).block(); UnmodifiableList<URI> readEndpoints = globalEndpointManager.getReadEndpoints(); List<URI> proactiveConnectionEndpoints = readEndpoints.subList( 0, Math.min(readEndpoints.size(),proactiveContainerInitConfig.getNumProactiveConnectionRegions())); Mono<CosmosAsyncContainer> asyncContainerMono = Mono.just(cosmosAsyncContainer); Mono<Utils.ValueHolder<List<PartitionKeyRange>>> partitionKeyRangeMono = Mono.just(cosmosAsyncContainer) .flatMap(CosmosAsyncContainer::read) .flatMap(containerResponse -> rxDocumentClient .getPartitionKeyRangeCache() .tryGetOverlappingRangesAsync( null, containerResponse.getProperties().getResourceId(), PartitionKeyInternalHelper.FullRange, false, null)); for (URI proactiveConnectionEndpoint : proactiveConnectionEndpoints) { Mono.zip(asyncContainerMono, partitionKeyRangeMono) .flatMapIterable(containerToPartitionKeyRanges -> { List<ImmutablePair<PartitionKeyRange, CosmosAsyncContainer>> pkrToContainer = new ArrayList<>(); for (PartitionKeyRange pkr : containerToPartitionKeyRanges.getT2().v) { pkrToContainer.add(new ImmutablePair<>(pkr, containerToPartitionKeyRanges.getT1())); } return pkrToContainer; }) .flatMap(partitionKeyRangeToContainer -> { RxDocumentServiceRequest dummyRequest = RxDocumentServiceRequest.createFromName( mockDiagnosticsClientContext(), OperationType.Read, partitionKeyRangeToContainer.getRight().getLink() + "/docId", ResourceType.Document); dummyRequest.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(partitionKeyRangeToContainer.getLeft().getId())); return globalAddressResolver.resolveAsync(dummyRequest, false); }) .delayElements(Duration.ofSeconds(3)) .doOnNext(addressInformations -> { for (AddressInformation address : addressInformations) { endpoints.add(address.getPhysicalUri().getURI().getAuthority()); } }) .blockLast(); globalEndpointManager.markEndpointUnavailableForRead(proactiveConnectionEndpoint); } assertThat(provider.count()).isEqualTo(endpoints.size()); assertThat(collectionInfoByNameMap.size()).isEqualTo(cosmosContainerIdentities.size()); assertThat(routingMap.size()).isEqualTo(cosmosContainerIdentities.size()); cosmosAsyncContainer.delete().block(); } finally { safeClose(asyncClient); } } @DataProvider(name = "proactiveContainerInitConfigs") private Object[][] proactiveContainerInitConfigs() { Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); List<String> preferredLocations = new ArrayList<>(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations.add(accountLocation.getName()); preferredLocations.add("EastUS"); } return new Object[][] { new Object[]{preferredLocations, 1, 3}, new Object[]{preferredLocations, 2, 1} }; } @DataProvider(name = "invalidProactiveContainerInitConfigs") private Object[][] invalidProactiveContainerInitConfigs() { Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); List<String> preferredLocations = new ArrayList<>(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations.add(accountLocation.getName()); } return new Object[][] { new Object[]{preferredLocations, 2, 1}, new Object[]{ Collections.unmodifiableList( Arrays.asList("R1", "R2", "R3", "R4", "R5", "R6")), 6, 1 } }; } private ConcurrentHashMap<String, ?> getCollectionInfoByNameMap(RxDocumentClientImpl rxDocumentClient) { RxClientCollectionCache collectionCache = ReflectionUtils.getClientCollectionCache(rxDocumentClient); AsyncCache<String, DocumentCollection> collectionInfoByNameCache = ReflectionUtils.getCollectionInfoByNameCache(collectionCache); return ReflectionUtils.getValueMap(collectionInfoByNameCache); } private ConcurrentHashMap<String, ?> getRoutingMap(RxDocumentClientImpl rxDocumentClient) { RxPartitionKeyRangeCache partitionKeyRangeCache = ReflectionUtils.getPartitionKeyRangeCache(rxDocumentClient); AsyncCacheNonBlocking<String, CollectionRoutingMap> routingMapAsyncCache = ReflectionUtils.getRoutingMapAsyncCacheNonBlocking(partitionKeyRangeCache); return ReflectionUtils.getValueMapNonBlockingCache(routingMapAsyncCache); } }
exception message can change anytime - and is usually not something we validate - changing exception messages wouldn't be considered breaking while changing the exception type definitely would. So, this is fine IMO.
public void openConnectionsAndInitCachesWithInvalidCosmosClientConfig(List<String> preferredRegions, int numProactiveConnectionRegions, int numContainers) { CosmosAsyncClient clientWithOpenConnections = null; List<CosmosAsyncContainer> asyncContainers = new ArrayList<>(); List<CosmosContainerIdentity> cosmosContainerIdentities = new ArrayList<>(); for (int i = 1; i <= numContainers; i++) { String containerId = String.format("id%d", i); cosmosAsyncDatabase.createContainerIfNotExists(containerId, "/mypk").block(); asyncContainers.add(cosmosAsyncDatabase.getContainer(containerId)); cosmosContainerIdentities.add(new CosmosContainerIdentity(cosmosAsyncDatabase.getId(), containerId)); } if (numProactiveConnectionRegions > 5) { try { CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); fail("Should have thrown exception"); } catch (IllegalArgumentException illegalArgEx) {} } else { CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); try { clientWithOpenConnections = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .endpointDiscoveryEnabled(true) .preferredRegions(preferredRegions) .openConnectionsAndInitCaches(proactiveContainerInitConfig) .directMode() .buildAsyncClient(); fail("Should have thrown exception"); } catch (IllegalArgumentException illegalArgEx) {} } }
} catch (IllegalArgumentException illegalArgEx) {}
public void openConnectionsAndInitCachesWithInvalidCosmosClientConfig(List<String> preferredRegions, int numProactiveConnectionRegions, int numContainers) { CosmosAsyncClient clientWithOpenConnections = null; List<CosmosAsyncContainer> asyncContainers = new ArrayList<>(); List<CosmosContainerIdentity> cosmosContainerIdentities = new ArrayList<>(); for (int i = 1; i <= numContainers; i++) { String containerId = String.format("id%d", i); cosmosAsyncDatabase.createContainerIfNotExists(containerId, "/mypk").block(); asyncContainers.add(cosmosAsyncDatabase.getContainer(containerId)); cosmosContainerIdentities.add(new CosmosContainerIdentity(cosmosAsyncDatabase.getId(), containerId)); } if (numProactiveConnectionRegions > 5) { try { CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); fail("Should have thrown exception"); } catch (IllegalArgumentException illegalArgEx) {} } else { CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); try { clientWithOpenConnections = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .endpointDiscoveryEnabled(true) .preferredRegions(preferredRegions) .openConnectionsAndInitCaches(proactiveContainerInitConfig) .directMode() .buildAsyncClient(); fail("Should have thrown exception"); } catch (IllegalArgumentException illegalArgEx) {} } }
class ProactiveConnectionManagementTest extends TestSuiteBase { private CosmosClientBuilder clientBuilder; private DatabaseAccount databaseAccount; private CosmosAsyncDatabase cosmosAsyncDatabase; @BeforeClass(groups = {"multi-region"}) public void beforeClass() { clientBuilder = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode(); CosmosAsyncClient dummyClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode().buildAsyncClient(); this.cosmosAsyncDatabase = getSharedCosmosDatabase(dummyClient); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(dummyClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); this.databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); safeClose(dummyClient); } @Test(groups = {"multi-region"}, dataProvider = "invalidProactiveContainerInitConfigs") @Test(groups = {"multi-region"}, dataProvider = "proactiveContainerInitConfigs") public void openConnectionsAndInitCachesWithCosmosClient(List<String> preferredRegions, int numProactiveConnectionRegions, int numContainers) { CosmosAsyncClient clientWithOpenConnections = null; try { List<CosmosAsyncContainer> asyncContainers = new ArrayList<>(); List<CosmosContainerIdentity> cosmosContainerIdentities = new ArrayList<>(); for (int i = 1; i <= numContainers; i++) { String containerId = String.format("id%d", i); cosmosAsyncDatabase.createContainerIfNotExists(containerId, "/mypk").block(); asyncContainers.add(cosmosAsyncDatabase.getContainer(containerId)); cosmosContainerIdentities.add(new CosmosContainerIdentity(cosmosAsyncDatabase.getId(), containerId)); } CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); clientWithOpenConnections = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .endpointDiscoveryEnabled(true) .preferredRegions(preferredRegions) .openConnectionsAndInitCaches(proactiveContainerInitConfig) .directMode() .buildAsyncClient(); RntbdTransportClient rntbdTransportClient = (RntbdTransportClient) ReflectionUtils.getTransportClient(clientWithOpenConnections); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(clientWithOpenConnections); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalAddressResolver globalAddressResolver = ReflectionUtils.getGlobalAddressResolver(rxDocumentClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); RntbdEndpoint.Provider provider = ReflectionUtils.getRntbdEndpointProvider(rntbdTransportClient); ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient); ConcurrentHashMap<String, ?> collectionInfoByNameMap = getCollectionInfoByNameMap(rxDocumentClient); Set<String> endpoints = ConcurrentHashMap.newKeySet(); UnmodifiableList<URI> readEndpoints = globalEndpointManager.getReadEndpoints(); List<URI> proactiveConnectionEndpoints = readEndpoints.subList( 0, Math.min(readEndpoints.size(), proactiveContainerInitConfig.getNumProactiveConnectionRegions())); Flux<CosmosAsyncContainer> asyncContainerFlux = Flux.fromIterable(asyncContainers); Flux<Utils.ValueHolder<List<PartitionKeyRange>>> partitionKeyRangeFlux = Flux.fromIterable(asyncContainers) .flatMap(CosmosAsyncContainer::read) .flatMap(containerResponse -> rxDocumentClient .getPartitionKeyRangeCache() .tryGetOverlappingRangesAsync( null, containerResponse.getProperties().getResourceId(), PartitionKeyInternalHelper.FullRange, false, null)); for (URI proactiveConnectionEndpoint : proactiveConnectionEndpoints) { Flux.zip(asyncContainerFlux, partitionKeyRangeFlux) .flatMapIterable(containerToPartitionKeyRanges -> { List<ImmutablePair<PartitionKeyRange, CosmosAsyncContainer>> pkrToContainer = new ArrayList<>(); for (PartitionKeyRange pkr : containerToPartitionKeyRanges.getT2().v) { pkrToContainer.add(new ImmutablePair<>(pkr, containerToPartitionKeyRanges.getT1())); } return pkrToContainer; }) .flatMap(partitionKeyRangeToContainer -> { RxDocumentServiceRequest dummyRequest = RxDocumentServiceRequest.createFromName( mockDiagnosticsClientContext(), OperationType.Read, partitionKeyRangeToContainer.getRight().getLink() + "/docId", ResourceType.Document); dummyRequest.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(partitionKeyRangeToContainer.getLeft().getId())); return globalAddressResolver.resolveAsync(dummyRequest, false); }) .delayElements(Duration.ofSeconds(3)) .doOnNext(addressInformations -> { for (AddressInformation address : addressInformations) { endpoints.add(address.getPhysicalUri().getURI().getAuthority()); } }) .blockLast(); globalEndpointManager.markEndpointUnavailableForRead(proactiveConnectionEndpoint); } assertThat(provider.count()).isEqualTo(endpoints.size()); assertThat(collectionInfoByNameMap.size()).isEqualTo(cosmosContainerIdentities.size()); assertThat(routingMap.size()).isEqualTo(cosmosContainerIdentities.size()); for (CosmosAsyncContainer asyncContainer : asyncContainers) { asyncContainer.delete().block(); } } finally { safeClose(clientWithOpenConnections); } } @Test(groups = {"multi-region"}, dataProvider = "proactiveContainerInitConfigs") public void openConnectionsAndInitCachesWithContainer(List<String> preferredRegions, int numProactiveConnectionRegions, int ignore) { CosmosAsyncClient asyncClient = null; try { asyncClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .endpointDiscoveryEnabled(true) .preferredRegions(preferredRegions) .directMode() .buildAsyncClient(); cosmosAsyncDatabase = getSharedCosmosDatabase(asyncClient); List<CosmosContainerIdentity> cosmosContainerIdentities = new ArrayList<>(); String containerId = "id1"; cosmosAsyncDatabase.createContainerIfNotExists(containerId, "/mypk").block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(containerId); cosmosContainerIdentities.add(new CosmosContainerIdentity(cosmosAsyncDatabase.getId(), containerId)); CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); RntbdTransportClient rntbdTransportClient = (RntbdTransportClient) ReflectionUtils.getTransportClient(asyncClient); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(asyncClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalAddressResolver globalAddressResolver = ReflectionUtils.getGlobalAddressResolver(rxDocumentClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); RntbdEndpoint.Provider provider = ReflectionUtils.getRntbdEndpointProvider(rntbdTransportClient); ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient); ConcurrentHashMap<String, ?> collectionInfoByNameMap = getCollectionInfoByNameMap(rxDocumentClient); Set<String> endpoints = ConcurrentHashMap.newKeySet(); cosmosAsyncContainer.openConnectionsAndInitCaches(numProactiveConnectionRegions).block(); UnmodifiableList<URI> readEndpoints = globalEndpointManager.getReadEndpoints(); List<URI> proactiveConnectionEndpoints = readEndpoints.subList( 0, Math.min(readEndpoints.size(),proactiveContainerInitConfig.getNumProactiveConnectionRegions())); Mono<CosmosAsyncContainer> asyncContainerMono = Mono.just(cosmosAsyncContainer); Mono<Utils.ValueHolder<List<PartitionKeyRange>>> partitionKeyRangeMono = Mono.just(cosmosAsyncContainer) .flatMap(CosmosAsyncContainer::read) .flatMap(containerResponse -> rxDocumentClient .getPartitionKeyRangeCache() .tryGetOverlappingRangesAsync( null, containerResponse.getProperties().getResourceId(), PartitionKeyInternalHelper.FullRange, false, null)); for (URI proactiveConnectionEndpoint : proactiveConnectionEndpoints) { Mono.zip(asyncContainerMono, partitionKeyRangeMono) .flatMapIterable(containerToPartitionKeyRanges -> { List<ImmutablePair<PartitionKeyRange, CosmosAsyncContainer>> pkrToContainer = new ArrayList<>(); for (PartitionKeyRange pkr : containerToPartitionKeyRanges.getT2().v) { pkrToContainer.add(new ImmutablePair<>(pkr, containerToPartitionKeyRanges.getT1())); } return pkrToContainer; }) .flatMap(partitionKeyRangeToContainer -> { RxDocumentServiceRequest dummyRequest = RxDocumentServiceRequest.createFromName( mockDiagnosticsClientContext(), OperationType.Read, partitionKeyRangeToContainer.getRight().getLink() + "/docId", ResourceType.Document); dummyRequest.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(partitionKeyRangeToContainer.getLeft().getId())); return globalAddressResolver.resolveAsync(dummyRequest, false); }) .delayElements(Duration.ofSeconds(3)) .doOnNext(addressInformations -> { for (AddressInformation address : addressInformations) { endpoints.add(address.getPhysicalUri().getURI().getAuthority()); } }) .blockLast(); globalEndpointManager.markEndpointUnavailableForRead(proactiveConnectionEndpoint); } assertThat(provider.count()).isEqualTo(endpoints.size()); assertThat(collectionInfoByNameMap.size()).isEqualTo(cosmosContainerIdentities.size()); assertThat(routingMap.size()).isEqualTo(cosmosContainerIdentities.size()); cosmosAsyncContainer.delete().block(); } finally { safeClose(asyncClient); } } @DataProvider(name = "proactiveContainerInitConfigs") private Object[][] proactiveContainerInitConfigs() { Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); List<String> preferredLocations = new ArrayList<>(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations.add(accountLocation.getName()); preferredLocations.add("EastUS"); } return new Object[][] { new Object[]{preferredLocations, 1, 3}, new Object[]{preferredLocations, 2, 1} }; } @DataProvider(name = "invalidProactiveContainerInitConfigs") private Object[][] invalidProactiveContainerInitConfigs() { Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); List<String> preferredLocations = new ArrayList<>(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations.add(accountLocation.getName()); } return new Object[][] { new Object[]{preferredLocations, 2, 1}, new Object[]{ Collections.unmodifiableList( Arrays.asList("R1", "R2", "R3", "R4", "R5", "R6")), 6, 1 } }; } private ConcurrentHashMap<String, ?> getCollectionInfoByNameMap(RxDocumentClientImpl rxDocumentClient) { RxClientCollectionCache collectionCache = ReflectionUtils.getClientCollectionCache(rxDocumentClient); AsyncCache<String, DocumentCollection> collectionInfoByNameCache = ReflectionUtils.getCollectionInfoByNameCache(collectionCache); return ReflectionUtils.getValueMap(collectionInfoByNameCache); } private ConcurrentHashMap<String, ?> getRoutingMap(RxDocumentClientImpl rxDocumentClient) { RxPartitionKeyRangeCache partitionKeyRangeCache = ReflectionUtils.getPartitionKeyRangeCache(rxDocumentClient); AsyncCacheNonBlocking<String, CollectionRoutingMap> routingMapAsyncCache = ReflectionUtils.getRoutingMapAsyncCacheNonBlocking(partitionKeyRangeCache); return ReflectionUtils.getValueMapNonBlockingCache(routingMapAsyncCache); } }
class ProactiveConnectionManagementTest extends TestSuiteBase { private CosmosClientBuilder clientBuilder; private DatabaseAccount databaseAccount; private CosmosAsyncDatabase cosmosAsyncDatabase; @BeforeClass(groups = {"multi-region"}) public void beforeClass() { clientBuilder = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode(); CosmosAsyncClient dummyClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode().buildAsyncClient(); this.cosmosAsyncDatabase = getSharedCosmosDatabase(dummyClient); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(dummyClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); this.databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); safeClose(dummyClient); } @Test(groups = {"multi-region"}, dataProvider = "invalidProactiveContainerInitConfigs") @Test(groups = {"multi-region"}, dataProvider = "proactiveContainerInitConfigs") public void openConnectionsAndInitCachesWithCosmosClient(List<String> preferredRegions, int numProactiveConnectionRegions, int numContainers) { CosmosAsyncClient clientWithOpenConnections = null; try { List<CosmosAsyncContainer> asyncContainers = new ArrayList<>(); List<CosmosContainerIdentity> cosmosContainerIdentities = new ArrayList<>(); for (int i = 1; i <= numContainers; i++) { String containerId = String.format("id%d", i); cosmosAsyncDatabase.createContainerIfNotExists(containerId, "/mypk").block(); asyncContainers.add(cosmosAsyncDatabase.getContainer(containerId)); cosmosContainerIdentities.add(new CosmosContainerIdentity(cosmosAsyncDatabase.getId(), containerId)); } CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); clientWithOpenConnections = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .endpointDiscoveryEnabled(true) .preferredRegions(preferredRegions) .openConnectionsAndInitCaches(proactiveContainerInitConfig) .directMode() .buildAsyncClient(); RntbdTransportClient rntbdTransportClient = (RntbdTransportClient) ReflectionUtils.getTransportClient(clientWithOpenConnections); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(clientWithOpenConnections); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalAddressResolver globalAddressResolver = ReflectionUtils.getGlobalAddressResolver(rxDocumentClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); RntbdEndpoint.Provider provider = ReflectionUtils.getRntbdEndpointProvider(rntbdTransportClient); ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient); ConcurrentHashMap<String, ?> collectionInfoByNameMap = getCollectionInfoByNameMap(rxDocumentClient); Set<String> endpoints = ConcurrentHashMap.newKeySet(); UnmodifiableList<URI> readEndpoints = globalEndpointManager.getReadEndpoints(); List<URI> proactiveConnectionEndpoints = readEndpoints.subList( 0, Math.min(readEndpoints.size(), proactiveContainerInitConfig.getNumProactiveConnectionRegions())); Flux<CosmosAsyncContainer> asyncContainerFlux = Flux.fromIterable(asyncContainers); Flux<Utils.ValueHolder<List<PartitionKeyRange>>> partitionKeyRangeFlux = Flux.fromIterable(asyncContainers) .flatMap(CosmosAsyncContainer::read) .flatMap(containerResponse -> rxDocumentClient .getPartitionKeyRangeCache() .tryGetOverlappingRangesAsync( null, containerResponse.getProperties().getResourceId(), PartitionKeyInternalHelper.FullRange, false, null)); for (URI proactiveConnectionEndpoint : proactiveConnectionEndpoints) { Flux.zip(asyncContainerFlux, partitionKeyRangeFlux) .flatMapIterable(containerToPartitionKeyRanges -> { List<ImmutablePair<PartitionKeyRange, CosmosAsyncContainer>> pkrToContainer = new ArrayList<>(); for (PartitionKeyRange pkr : containerToPartitionKeyRanges.getT2().v) { pkrToContainer.add(new ImmutablePair<>(pkr, containerToPartitionKeyRanges.getT1())); } return pkrToContainer; }) .flatMap(partitionKeyRangeToContainer -> { RxDocumentServiceRequest dummyRequest = RxDocumentServiceRequest.createFromName( mockDiagnosticsClientContext(), OperationType.Read, partitionKeyRangeToContainer.getRight().getLink() + "/docId", ResourceType.Document); dummyRequest.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(partitionKeyRangeToContainer.getLeft().getId())); return globalAddressResolver.resolveAsync(dummyRequest, false); }) .delayElements(Duration.ofSeconds(3)) .doOnNext(addressInformations -> { for (AddressInformation address : addressInformations) { endpoints.add(address.getPhysicalUri().getURI().getAuthority()); } }) .blockLast(); globalEndpointManager.markEndpointUnavailableForRead(proactiveConnectionEndpoint); } assertThat(provider.count()).isEqualTo(endpoints.size()); assertThat(collectionInfoByNameMap.size()).isEqualTo(cosmosContainerIdentities.size()); assertThat(routingMap.size()).isEqualTo(cosmosContainerIdentities.size()); for (CosmosAsyncContainer asyncContainer : asyncContainers) { asyncContainer.delete().block(); } } finally { safeClose(clientWithOpenConnections); } } @Test(groups = {"multi-region"}, dataProvider = "proactiveContainerInitConfigs") public void openConnectionsAndInitCachesWithContainer(List<String> preferredRegions, int numProactiveConnectionRegions, int ignore) { CosmosAsyncClient asyncClient = null; try { asyncClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .endpointDiscoveryEnabled(true) .preferredRegions(preferredRegions) .directMode() .buildAsyncClient(); cosmosAsyncDatabase = getSharedCosmosDatabase(asyncClient); List<CosmosContainerIdentity> cosmosContainerIdentities = new ArrayList<>(); String containerId = "id1"; cosmosAsyncDatabase.createContainerIfNotExists(containerId, "/mypk").block(); CosmosAsyncContainer cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(containerId); cosmosContainerIdentities.add(new CosmosContainerIdentity(cosmosAsyncDatabase.getId(), containerId)); CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(cosmosContainerIdentities) .setProactiveConnectionRegions(numProactiveConnectionRegions) .build(); RntbdTransportClient rntbdTransportClient = (RntbdTransportClient) ReflectionUtils.getTransportClient(asyncClient); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(asyncClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalAddressResolver globalAddressResolver = ReflectionUtils.getGlobalAddressResolver(rxDocumentClient); GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); RntbdEndpoint.Provider provider = ReflectionUtils.getRntbdEndpointProvider(rntbdTransportClient); ConcurrentHashMap<String, ?> routingMap = getRoutingMap(rxDocumentClient); ConcurrentHashMap<String, ?> collectionInfoByNameMap = getCollectionInfoByNameMap(rxDocumentClient); Set<String> endpoints = ConcurrentHashMap.newKeySet(); cosmosAsyncContainer.openConnectionsAndInitCaches(numProactiveConnectionRegions).block(); UnmodifiableList<URI> readEndpoints = globalEndpointManager.getReadEndpoints(); List<URI> proactiveConnectionEndpoints = readEndpoints.subList( 0, Math.min(readEndpoints.size(),proactiveContainerInitConfig.getNumProactiveConnectionRegions())); Mono<CosmosAsyncContainer> asyncContainerMono = Mono.just(cosmosAsyncContainer); Mono<Utils.ValueHolder<List<PartitionKeyRange>>> partitionKeyRangeMono = Mono.just(cosmosAsyncContainer) .flatMap(CosmosAsyncContainer::read) .flatMap(containerResponse -> rxDocumentClient .getPartitionKeyRangeCache() .tryGetOverlappingRangesAsync( null, containerResponse.getProperties().getResourceId(), PartitionKeyInternalHelper.FullRange, false, null)); for (URI proactiveConnectionEndpoint : proactiveConnectionEndpoints) { Mono.zip(asyncContainerMono, partitionKeyRangeMono) .flatMapIterable(containerToPartitionKeyRanges -> { List<ImmutablePair<PartitionKeyRange, CosmosAsyncContainer>> pkrToContainer = new ArrayList<>(); for (PartitionKeyRange pkr : containerToPartitionKeyRanges.getT2().v) { pkrToContainer.add(new ImmutablePair<>(pkr, containerToPartitionKeyRanges.getT1())); } return pkrToContainer; }) .flatMap(partitionKeyRangeToContainer -> { RxDocumentServiceRequest dummyRequest = RxDocumentServiceRequest.createFromName( mockDiagnosticsClientContext(), OperationType.Read, partitionKeyRangeToContainer.getRight().getLink() + "/docId", ResourceType.Document); dummyRequest.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(partitionKeyRangeToContainer.getLeft().getId())); return globalAddressResolver.resolveAsync(dummyRequest, false); }) .delayElements(Duration.ofSeconds(3)) .doOnNext(addressInformations -> { for (AddressInformation address : addressInformations) { endpoints.add(address.getPhysicalUri().getURI().getAuthority()); } }) .blockLast(); globalEndpointManager.markEndpointUnavailableForRead(proactiveConnectionEndpoint); } assertThat(provider.count()).isEqualTo(endpoints.size()); assertThat(collectionInfoByNameMap.size()).isEqualTo(cosmosContainerIdentities.size()); assertThat(routingMap.size()).isEqualTo(cosmosContainerIdentities.size()); cosmosAsyncContainer.delete().block(); } finally { safeClose(asyncClient); } } @DataProvider(name = "proactiveContainerInitConfigs") private Object[][] proactiveContainerInitConfigs() { Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); List<String> preferredLocations = new ArrayList<>(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations.add(accountLocation.getName()); preferredLocations.add("EastUS"); } return new Object[][] { new Object[]{preferredLocations, 1, 3}, new Object[]{preferredLocations, 2, 1} }; } @DataProvider(name = "invalidProactiveContainerInitConfigs") private Object[][] invalidProactiveContainerInitConfigs() { Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); List<String> preferredLocations = new ArrayList<>(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations.add(accountLocation.getName()); } return new Object[][] { new Object[]{preferredLocations, 2, 1}, new Object[]{ Collections.unmodifiableList( Arrays.asList("R1", "R2", "R3", "R4", "R5", "R6")), 6, 1 } }; } private ConcurrentHashMap<String, ?> getCollectionInfoByNameMap(RxDocumentClientImpl rxDocumentClient) { RxClientCollectionCache collectionCache = ReflectionUtils.getClientCollectionCache(rxDocumentClient); AsyncCache<String, DocumentCollection> collectionInfoByNameCache = ReflectionUtils.getCollectionInfoByNameCache(collectionCache); return ReflectionUtils.getValueMap(collectionInfoByNameCache); } private ConcurrentHashMap<String, ?> getRoutingMap(RxDocumentClientImpl rxDocumentClient) { RxPartitionKeyRangeCache partitionKeyRangeCache = ReflectionUtils.getPartitionKeyRangeCache(rxDocumentClient); AsyncCacheNonBlocking<String, CollectionRoutingMap> routingMapAsyncCache = ReflectionUtils.getRoutingMapAsyncCacheNonBlocking(partitionKeyRangeCache); return ReflectionUtils.getValueMapNonBlockingCache(routingMapAsyncCache); } }
we probably can just call `openConnectionsAndInitCaches(1)` overall
public Mono<Void> openConnectionsAndInitCaches() { if (isInitialized.compareAndSet(false, true)) { CosmosContainerIdentity cosmosContainerIdentity = new CosmosContainerIdentity(this.database.getId(), this.id); CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(Arrays.asList(cosmosContainerIdentity)) .setProactiveConnectionRegions(1) .build(); return withContext(context -> openConnectionsAndInitCachesInternal(proactiveContainerInitConfig) .flatMap(openResult -> { logger.info("OpenConnectionsAndInitCaches: {}", openResult); return Mono.empty(); })); } else { logger.warn("OpenConnectionsAndInitCaches is already called once on Container {}, no operation will take place in this call", this.getId()); return Mono.empty(); } }
if (isInitialized.compareAndSet(false, true)) {
public Mono<Void> openConnectionsAndInitCaches() { if (isInitialized.compareAndSet(false, true)) { CosmosContainerIdentity cosmosContainerIdentity = new CosmosContainerIdentity(this.database.getId(), this.id); CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(Arrays.asList(cosmosContainerIdentity)) .setProactiveConnectionRegions(1) .build(); return withContext(context -> openConnectionsAndInitCachesInternal(proactiveContainerInitConfig) .flatMap(openResult -> { logger.info("OpenConnectionsAndInitCaches: {}", openResult); return Mono.empty(); })); } else { logger.warn("OpenConnectionsAndInitCaches is already called once on Container {}, no operation will take place in this call", this.getId()); return Mono.empty(); } }
class type. * @return a {@link CosmosPagedFlux}
class type. * @return a {@link CosmosPagedFlux}
I think I got why we can not do that - because we want to avoid to make it breaking change. As currently, customer can call `openConnectionsAndInitCaches() ` without preferredRegions being configured. So it need to have its own validation. Can we add a comments here to mention the above difference?
public Mono<Void> openConnectionsAndInitCaches() { if (isInitialized.compareAndSet(false, true)) { CosmosContainerIdentity cosmosContainerIdentity = new CosmosContainerIdentity(this.database.getId(), this.id); CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(Arrays.asList(cosmosContainerIdentity)) .setProactiveConnectionRegions(1) .build(); return withContext(context -> openConnectionsAndInitCachesInternal(proactiveContainerInitConfig) .flatMap(openResult -> { logger.info("OpenConnectionsAndInitCaches: {}", openResult); return Mono.empty(); })); } else { logger.warn("OpenConnectionsAndInitCaches is already called once on Container {}, no operation will take place in this call", this.getId()); return Mono.empty(); } }
if (isInitialized.compareAndSet(false, true)) {
public Mono<Void> openConnectionsAndInitCaches() { if (isInitialized.compareAndSet(false, true)) { CosmosContainerIdentity cosmosContainerIdentity = new CosmosContainerIdentity(this.database.getId(), this.id); CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(Arrays.asList(cosmosContainerIdentity)) .setProactiveConnectionRegions(1) .build(); return withContext(context -> openConnectionsAndInitCachesInternal(proactiveContainerInitConfig) .flatMap(openResult -> { logger.info("OpenConnectionsAndInitCaches: {}", openResult); return Mono.empty(); })); } else { logger.warn("OpenConnectionsAndInitCaches is already called once on Container {}, no operation will take place in this call", this.getId()); return Mono.empty(); } }
class type. * @return a {@link CosmosPagedFlux}
class type. * @return a {@link CosmosPagedFlux}
Correct - avoiding the preferred region validation
public Mono<Void> openConnectionsAndInitCaches() { if (isInitialized.compareAndSet(false, true)) { CosmosContainerIdentity cosmosContainerIdentity = new CosmosContainerIdentity(this.database.getId(), this.id); CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(Arrays.asList(cosmosContainerIdentity)) .setProactiveConnectionRegions(1) .build(); return withContext(context -> openConnectionsAndInitCachesInternal(proactiveContainerInitConfig) .flatMap(openResult -> { logger.info("OpenConnectionsAndInitCaches: {}", openResult); return Mono.empty(); })); } else { logger.warn("OpenConnectionsAndInitCaches is already called once on Container {}, no operation will take place in this call", this.getId()); return Mono.empty(); } }
if (isInitialized.compareAndSet(false, true)) {
public Mono<Void> openConnectionsAndInitCaches() { if (isInitialized.compareAndSet(false, true)) { CosmosContainerIdentity cosmosContainerIdentity = new CosmosContainerIdentity(this.database.getId(), this.id); CosmosContainerProactiveInitConfig proactiveContainerInitConfig = new CosmosContainerProactiveInitConfigBuilder(Arrays.asList(cosmosContainerIdentity)) .setProactiveConnectionRegions(1) .build(); return withContext(context -> openConnectionsAndInitCachesInternal(proactiveContainerInitConfig) .flatMap(openResult -> { logger.info("OpenConnectionsAndInitCaches: {}", openResult); return Mono.empty(); })); } else { logger.warn("OpenConnectionsAndInitCaches is already called once on Container {}, no operation will take place in this call", this.getId()); return Mono.empty(); } }
class type. * @return a {@link CosmosPagedFlux}
class type. * @return a {@link CosmosPagedFlux}
Need to double think about it -> it can simulate timeout happens, but the diagnostics will be off, want to change into the same mode as connection delay
private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime()); requestRecord.stage( RntbdRequestRecord.Stage.RECEIVED, response.getDecodeEndTime() != null ? response.getDecodeEndTime() : Instant.now()); requestRecord.responseLength(response.getMessageLength()); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) || statusCode == HttpResponseStatus.NOT_MODIFIED.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); if (this.serverErrorInjector != null && this.serverErrorInjector.applyServerResponseLatencyRule(requestRecord, storeResponse)) { return; } requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); final String resourceAddress = requestRecord.args().physicalAddressUri() != null ? requestRecord.args().physicalAddressUri().getURI().toString() : null; switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT_OR_MERGE: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: GoneException goneExceptionFromService = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); goneExceptionFromService.setIsBasedOn410ResponseFromService(); cause = goneExceptionFromService; break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders); break; } BridgeInternal.setResourceAddress(cause, resourceAddress); if (this.serverErrorInjector != null && this.serverErrorInjector.applyServerResponseLatencyRule(requestRecord, cause)) { return; } requestRecord.completeExceptionally(cause); } }
if (this.serverErrorInjector != null
private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime()); requestRecord.stage( RntbdRequestRecord.Stage.RECEIVED, response.getDecodeEndTime() != null ? response.getDecodeEndTime() : Instant.now()); requestRecord.responseLength(response.getMessageLength()); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) || statusCode == HttpResponseStatus.NOT_MODIFIED.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); final String resourceAddress = requestRecord.args().physicalAddressUri() != null ? requestRecord.args().physicalAddressUri().getURI().toString() : null; switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT_OR_MERGE: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: GoneException goneExceptionFromService = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); goneExceptionFromService.setIsBasedOn410ResponseFromService(); cause = goneExceptionFromService; break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders); break; } BridgeInternal.setResourceAddress(cause, resourceAddress); requestRecord.completeExceptionally(cause); } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private final RntbdConnectionStateListener rntbdConnectionStateListener; private final long idleConnectionTimerResolutionInNanos; private final RntbdServerErrorInjector serverErrorInjector; private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager( final ChannelHealthChecker healthChecker, final int pendingRequestLimit, final RntbdConnectionStateListener connectionStateListener, final long idleConnectionTimerResolutionInNanos, final RntbdServerErrorInjector serverErrorInjector) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; this.rntbdConnectionStateListener = connectionStateListener; this.idleConnectionTimerResolutionInNanos = idleConnectionTimerResolutionInNanos; this.serverErrorInjector = serverErrorInjector; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); this.timestamps.channelReadCompleted(); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); if (logger.isDebugEnabled()) { logger.debug("{} closing due to:", context, cause); } context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { if (this.healthChecker instanceof RntbdClientChannelHealthChecker) { ((RntbdClientChannelHealthChecker) this.healthChecker) .isHealthyWithFailureReason(context.channel()).addListener((Future<String> future) -> { final Throwable cause; if (future.isSuccess()) { if (RntbdConstants.RntbdHealthCheckResults.SuccessValue.equals(future.get())) { return; } cause = new UnhealthyChannelException(future.get()); } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); } else { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = new UnhealthyChannelException( MessageFormat.format( "Custom ChannelHealthChecker {0} failed.", this.healthChecker.getClass().getSimpleName())); } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); } return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); this.timestamps.channelReadCompleted(); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); this.exceptionCaught(context, (RntbdContextException)event); return; } if (event instanceof SslHandshakeCompletionEvent) { SslHandshakeCompletionEvent sslHandshakeCompletionEvent = (SslHandshakeCompletionEvent) event; if (sslHandshakeCompletionEvent.isSuccess()) { if (logger.isDebugEnabled()) { logger.debug("SslHandshake completed, adding idleStateHandler"); } context.pipeline().addAfter( SslHandler.class.toString(), IdleStateHandler.class.toString(), new IdleStateHandler( this.idleConnectionTimerResolutionInNanos, this.idleConnectionTimerResolutionInNanos, 0, TimeUnit.NANOSECONDS)); } else { if (logger.isDebugEnabled()) { logger.debug("SslHandshake failed", sslHandshakeCompletionEvent.cause()); } this.exceptionCaught(context, sslHandshakeCompletionEvent.cause()); return; } } if (event instanceof RntbdFaultInjectionConnectionResetEvent) { this.exceptionCaught(context, new IOException("Fault Injection Connection Reset")); return; } if (event instanceof RntbdFaultInjectionConnectionCloseEvent) { context.close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); } /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; record.setTimestamps(this.timestamps); if (!record.isCancelled()) { record.setSendingRequestHasStarted(); this.timestamps.channelWriteAttempted(); if (this.serverErrorInjector != null && this.serverErrorInjector.applyServerResponseErrorRule(record)) { return; } context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); } return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } public RntbdChannelStatistics getChannelStatistics( Channel channel, RntbdChannelAcquisitionTimeline channelAcquisitionTimeline) { return new RntbdChannelStatistics() .channelId(channel.id().toString()) .pendingRequestsCount(this.pendingRequests.size()) .channelTaskQueueSize(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop())) .lastReadTime(this.timestamps.lastChannelReadTime()) .transitTimeoutCount(this.timestamps.transitTimeoutCount()) .transitTimeoutStartingTime(this.timestamps.transitTimeoutStartingTime()) .waitForConnectionInit(channelAcquisitionTimeline.isWaitForChannelInit()); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } RntbdChannelState getChannelState(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); if (this.pendingRequests.size() < limit) { return RntbdChannelState.ok(this.pendingRequests.size()); } if (this.hasRntbdContext()) { return RntbdChannelState.pendingLimit(this.pendingRequests.size()); } else { return RntbdChannelState.contextNegotiationPending((this.pendingRequests.size())); } } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } public Timestamps getTimestamps() { return this.timestamps; } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { AtomicReference<Timeout> pendingRequestTimeout = new AtomicReference<>(); this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); pendingRequestTimeout.set(record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); })); return record; }); record.whenComplete((response, error) -> { this.pendingRequests.remove(record.transportRequestId()); if (pendingRequestTimeout.get() != null) { pendingRequestTimeout.get().cancel(); } }); return record; } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.rntbdConnectionStateListener != null) { this.rntbdConnectionStateListener.onException(throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddressUri().getURI().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.trace("{}\n{}\n{}", operationName, context, args); } public final static class UnhealthyChannelException extends ChannelException { public UnhealthyChannelException(String reason) { super("health check failed, reason: " + reason); } @Override public Throwable fillInStackTrace() { return this; } } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private final RntbdConnectionStateListener rntbdConnectionStateListener; private final long idleConnectionTimerResolutionInNanos; private final long tcpNetworkRequestTimeoutInNanos; private final RntbdServerErrorInjector serverErrorInjector; private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager( final ChannelHealthChecker healthChecker, final int pendingRequestLimit, final RntbdConnectionStateListener connectionStateListener, final long idleConnectionTimerResolutionInNanos, final RntbdServerErrorInjector serverErrorInjector, final long tcpNetworkRequestTimeoutInNanos) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; this.rntbdConnectionStateListener = connectionStateListener; this.idleConnectionTimerResolutionInNanos = idleConnectionTimerResolutionInNanos; this.tcpNetworkRequestTimeoutInNanos = tcpNetworkRequestTimeoutInNanos; this.serverErrorInjector = serverErrorInjector; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); this.timestamps.channelReadCompleted(); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); if (logger.isDebugEnabled()) { logger.debug("{} closing due to:", context, cause); } context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { if (this.healthChecker instanceof RntbdClientChannelHealthChecker) { ((RntbdClientChannelHealthChecker) this.healthChecker) .isHealthyWithFailureReason(context.channel()).addListener((Future<String> future) -> { final Throwable cause; if (future.isSuccess()) { if (RntbdConstants.RntbdHealthCheckResults.SuccessValue.equals(future.get())) { return; } cause = new UnhealthyChannelException(future.get()); } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); } else { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = new UnhealthyChannelException( MessageFormat.format( "Custom ChannelHealthChecker {0} failed.", this.healthChecker.getClass().getSimpleName())); } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); } return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); this.timestamps.channelReadCompleted(); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); this.exceptionCaught(context, (RntbdContextException)event); return; } if (event instanceof SslHandshakeCompletionEvent) { SslHandshakeCompletionEvent sslHandshakeCompletionEvent = (SslHandshakeCompletionEvent) event; if (sslHandshakeCompletionEvent.isSuccess()) { if (logger.isDebugEnabled()) { logger.debug("SslHandshake completed, adding idleStateHandler"); } context.pipeline().addAfter( SslHandler.class.toString(), IdleStateHandler.class.toString(), new IdleStateHandler( this.idleConnectionTimerResolutionInNanos, this.idleConnectionTimerResolutionInNanos, 0, TimeUnit.NANOSECONDS)); } else { if (logger.isDebugEnabled()) { logger.debug("SslHandshake failed", sslHandshakeCompletionEvent.cause()); } this.exceptionCaught(context, sslHandshakeCompletionEvent.cause()); return; } } if (event instanceof RntbdFaultInjectionConnectionResetEvent) { this.exceptionCaught(context, new IOException("Fault Injection Connection Reset")); return; } if (event instanceof RntbdFaultInjectionConnectionCloseEvent) { context.close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); } /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; record.setTimestamps(this.timestamps); if (!record.isCancelled()) { record.setSendingRequestHasStarted(); this.timestamps.channelWriteAttempted(); if (this.serverErrorInjector != null) { if (this.serverErrorInjector.injectRntbdServerResponseError(record)) { return; } Consumer<Duration> writeRequestWithInjectedDelayConsumer = (delay) -> this.writeRequestWithInjectedDelay(context, record, promise, delay); if (this.serverErrorInjector.injectRntbdServerResponseDelay(record, writeRequestWithInjectedDelayConsumer)) { return; } } context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); } return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } private void writeRequestWithInjectedDelay( final ChannelHandlerContext context, final RntbdRequestRecord rntbdRequestRecord, final ChannelPromise promise, Duration delay) { this.addPendingRequestRecord(context, rntbdRequestRecord); rntbdRequestRecord.stage(RntbdRequestRecord.Stage.SENT); this.timestamps.channelWriteCompleted(); long effectiveDelayInNanos = Math.min(this.tcpNetworkRequestTimeoutInNanos, delay.toNanos()); context.executor().schedule( () -> { if (this.tcpNetworkRequestTimeoutInNanos <= delay.toNanos()) { return; } context.write(rntbdRequestRecord, promise); }, effectiveDelayInNanos, TimeUnit.NANOSECONDS ); } public RntbdChannelStatistics getChannelStatistics( Channel channel, RntbdChannelAcquisitionTimeline channelAcquisitionTimeline) { return new RntbdChannelStatistics() .channelId(channel.id().toString()) .pendingRequestsCount(this.pendingRequests.size()) .channelTaskQueueSize(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop())) .lastReadTime(this.timestamps.lastChannelReadTime()) .transitTimeoutCount(this.timestamps.transitTimeoutCount()) .transitTimeoutStartingTime(this.timestamps.transitTimeoutStartingTime()) .waitForConnectionInit(channelAcquisitionTimeline.isWaitForChannelInit()); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } RntbdChannelState getChannelState(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); if (this.pendingRequests.size() < limit) { return RntbdChannelState.ok(this.pendingRequests.size()); } if (this.hasRntbdContext()) { return RntbdChannelState.pendingLimit(this.pendingRequests.size()); } else { return RntbdChannelState.contextNegotiationPending((this.pendingRequests.size())); } } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } public Timestamps getTimestamps() { return this.timestamps; } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { AtomicReference<Timeout> pendingRequestTimeout = new AtomicReference<>(); this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); pendingRequestTimeout.set(record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); })); return record; }); record.whenComplete((response, error) -> { this.pendingRequests.remove(record.transportRequestId()); if (pendingRequestTimeout.get() != null) { pendingRequestTimeout.get().cancel(); } }); return record; } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.rntbdConnectionStateListener != null) { this.rntbdConnectionStateListener.onException(throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddressUri().getURI().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.trace("{}\n{}\n{}", operationName, context, args); } public final static class UnhealthyChannelException extends ChannelException { public UnhealthyChannelException(String reason) { super("health check failed, reason: " + reason); } @Override public Throwable fillInStackTrace() { return this; } } }
Changed to inject delay during write
private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime()); requestRecord.stage( RntbdRequestRecord.Stage.RECEIVED, response.getDecodeEndTime() != null ? response.getDecodeEndTime() : Instant.now()); requestRecord.responseLength(response.getMessageLength()); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) || statusCode == HttpResponseStatus.NOT_MODIFIED.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); if (this.serverErrorInjector != null && this.serverErrorInjector.applyServerResponseLatencyRule(requestRecord, storeResponse)) { return; } requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); final String resourceAddress = requestRecord.args().physicalAddressUri() != null ? requestRecord.args().physicalAddressUri().getURI().toString() : null; switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT_OR_MERGE: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: GoneException goneExceptionFromService = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); goneExceptionFromService.setIsBasedOn410ResponseFromService(); cause = goneExceptionFromService; break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders); break; } BridgeInternal.setResourceAddress(cause, resourceAddress); if (this.serverErrorInjector != null && this.serverErrorInjector.applyServerResponseLatencyRule(requestRecord, cause)) { return; } requestRecord.completeExceptionally(cause); } }
if (this.serverErrorInjector != null
private void messageReceived(final ChannelHandlerContext context, final RntbdResponse response) { final Long transportRequestId = response.getTransportRequestId(); if (transportRequestId == null) { reportIssue(context, "response ignored because its transportRequestId is missing: {}", response); return; } final RntbdRequestRecord requestRecord = this.pendingRequests.get(transportRequestId); if (requestRecord == null) { logger.debug("response {} ignored because its requestRecord is missing: {}", transportRequestId, response); return; } requestRecord.stage(RntbdRequestRecord.Stage.DECODE_STARTED, response.getDecodeStartTime()); requestRecord.stage( RntbdRequestRecord.Stage.RECEIVED, response.getDecodeEndTime() != null ? response.getDecodeEndTime() : Instant.now()); requestRecord.responseLength(response.getMessageLength()); final HttpResponseStatus status = response.getStatus(); final UUID activityId = response.getActivityId(); final int statusCode = status.code(); if ((HttpResponseStatus.OK.code() <= statusCode && statusCode < HttpResponseStatus.MULTIPLE_CHOICES.code()) || statusCode == HttpResponseStatus.NOT_MODIFIED.code()) { final StoreResponse storeResponse = response.toStoreResponse(this.contextFuture.getNow(null)); requestRecord.complete(storeResponse); } else { final CosmosException cause; final long lsn = response.getHeader(RntbdResponseHeader.LSN); final String partitionKeyRangeId = response.getHeader(RntbdResponseHeader.PartitionKeyRangeId); final CosmosError error = response.hasPayload() ? new CosmosError(RntbdObjectMapper.readTree(response)) : new CosmosError(Integer.toString(statusCode), status.reasonPhrase(), status.codeClass().name()); final Map<String, String> responseHeaders = response.getHeaders().asMap( this.rntbdContext().orElseThrow(IllegalStateException::new), activityId ); final String resourceAddress = requestRecord.args().physicalAddressUri() != null ? requestRecord.args().physicalAddressUri().getURI().toString() : null; switch (status.code()) { case StatusCodes.BADREQUEST: cause = new BadRequestException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.CONFLICT: cause = new ConflictException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.FORBIDDEN: cause = new ForbiddenException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.GONE: final int subStatusCode = Math.toIntExact(response.getHeader(RntbdResponseHeader.SubStatus)); switch (subStatusCode) { case SubStatusCodes.COMPLETING_SPLIT_OR_MERGE: cause = new PartitionKeyRangeIsSplittingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.COMPLETING_PARTITION_MIGRATION: cause = new PartitionIsMigratingException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.NAME_CACHE_IS_STALE: cause = new InvalidPartitionException(error, lsn, partitionKeyRangeId, responseHeaders); break; case SubStatusCodes.PARTITION_KEY_RANGE_GONE: cause = new PartitionKeyRangeGoneException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: GoneException goneExceptionFromService = new GoneException(error, lsn, partitionKeyRangeId, responseHeaders); goneExceptionFromService.setIsBasedOn410ResponseFromService(); cause = goneExceptionFromService; break; } break; case StatusCodes.INTERNAL_SERVER_ERROR: cause = new InternalServerErrorException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.LOCKED: cause = new LockedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.METHOD_NOT_ALLOWED: cause = new MethodNotAllowedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.NOTFOUND: cause = new NotFoundException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.PRECONDITION_FAILED: cause = new PreconditionFailedException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_ENTITY_TOO_LARGE: cause = new RequestEntityTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.REQUEST_TIMEOUT: Exception inner = new RequestTimeoutException(error, lsn, partitionKeyRangeId, responseHeaders); cause = new GoneException(resourceAddress, error, lsn, partitionKeyRangeId, responseHeaders, inner); break; case StatusCodes.RETRY_WITH: cause = new RetryWithException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.SERVICE_UNAVAILABLE: cause = new ServiceUnavailableException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.TOO_MANY_REQUESTS: cause = new RequestRateTooLargeException(error, lsn, partitionKeyRangeId, responseHeaders); break; case StatusCodes.UNAUTHORIZED: cause = new UnauthorizedException(error, lsn, partitionKeyRangeId, responseHeaders); break; default: cause = BridgeInternal.createCosmosException(resourceAddress, status.code(), error, responseHeaders); break; } BridgeInternal.setResourceAddress(cause, resourceAddress); requestRecord.completeExceptionally(cause); } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private final RntbdConnectionStateListener rntbdConnectionStateListener; private final long idleConnectionTimerResolutionInNanos; private final RntbdServerErrorInjector serverErrorInjector; private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager( final ChannelHealthChecker healthChecker, final int pendingRequestLimit, final RntbdConnectionStateListener connectionStateListener, final long idleConnectionTimerResolutionInNanos, final RntbdServerErrorInjector serverErrorInjector) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; this.rntbdConnectionStateListener = connectionStateListener; this.idleConnectionTimerResolutionInNanos = idleConnectionTimerResolutionInNanos; this.serverErrorInjector = serverErrorInjector; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); this.timestamps.channelReadCompleted(); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); if (logger.isDebugEnabled()) { logger.debug("{} closing due to:", context, cause); } context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { if (this.healthChecker instanceof RntbdClientChannelHealthChecker) { ((RntbdClientChannelHealthChecker) this.healthChecker) .isHealthyWithFailureReason(context.channel()).addListener((Future<String> future) -> { final Throwable cause; if (future.isSuccess()) { if (RntbdConstants.RntbdHealthCheckResults.SuccessValue.equals(future.get())) { return; } cause = new UnhealthyChannelException(future.get()); } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); } else { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = new UnhealthyChannelException( MessageFormat.format( "Custom ChannelHealthChecker {0} failed.", this.healthChecker.getClass().getSimpleName())); } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); } return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); this.timestamps.channelReadCompleted(); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); this.exceptionCaught(context, (RntbdContextException)event); return; } if (event instanceof SslHandshakeCompletionEvent) { SslHandshakeCompletionEvent sslHandshakeCompletionEvent = (SslHandshakeCompletionEvent) event; if (sslHandshakeCompletionEvent.isSuccess()) { if (logger.isDebugEnabled()) { logger.debug("SslHandshake completed, adding idleStateHandler"); } context.pipeline().addAfter( SslHandler.class.toString(), IdleStateHandler.class.toString(), new IdleStateHandler( this.idleConnectionTimerResolutionInNanos, this.idleConnectionTimerResolutionInNanos, 0, TimeUnit.NANOSECONDS)); } else { if (logger.isDebugEnabled()) { logger.debug("SslHandshake failed", sslHandshakeCompletionEvent.cause()); } this.exceptionCaught(context, sslHandshakeCompletionEvent.cause()); return; } } if (event instanceof RntbdFaultInjectionConnectionResetEvent) { this.exceptionCaught(context, new IOException("Fault Injection Connection Reset")); return; } if (event instanceof RntbdFaultInjectionConnectionCloseEvent) { context.close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); } /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; record.setTimestamps(this.timestamps); if (!record.isCancelled()) { record.setSendingRequestHasStarted(); this.timestamps.channelWriteAttempted(); if (this.serverErrorInjector != null && this.serverErrorInjector.applyServerResponseErrorRule(record)) { return; } context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); } return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } public RntbdChannelStatistics getChannelStatistics( Channel channel, RntbdChannelAcquisitionTimeline channelAcquisitionTimeline) { return new RntbdChannelStatistics() .channelId(channel.id().toString()) .pendingRequestsCount(this.pendingRequests.size()) .channelTaskQueueSize(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop())) .lastReadTime(this.timestamps.lastChannelReadTime()) .transitTimeoutCount(this.timestamps.transitTimeoutCount()) .transitTimeoutStartingTime(this.timestamps.transitTimeoutStartingTime()) .waitForConnectionInit(channelAcquisitionTimeline.isWaitForChannelInit()); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } RntbdChannelState getChannelState(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); if (this.pendingRequests.size() < limit) { return RntbdChannelState.ok(this.pendingRequests.size()); } if (this.hasRntbdContext()) { return RntbdChannelState.pendingLimit(this.pendingRequests.size()); } else { return RntbdChannelState.contextNegotiationPending((this.pendingRequests.size())); } } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } public Timestamps getTimestamps() { return this.timestamps; } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { AtomicReference<Timeout> pendingRequestTimeout = new AtomicReference<>(); this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); pendingRequestTimeout.set(record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); })); return record; }); record.whenComplete((response, error) -> { this.pendingRequests.remove(record.transportRequestId()); if (pendingRequestTimeout.get() != null) { pendingRequestTimeout.get().cancel(); } }); return record; } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.rntbdConnectionStateListener != null) { this.rntbdConnectionStateListener.onException(throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddressUri().getURI().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.trace("{}\n{}\n{}", operationName, context, args); } public final static class UnhealthyChannelException extends ChannelException { public UnhealthyChannelException(String reason) { super("health check failed, reason: " + reason); } @Override public Throwable fillInStackTrace() { return this; } } }
class RntbdRequestManager implements ChannelHandler, ChannelInboundHandler, ChannelOutboundHandler { private static final ClosedChannelException ON_CHANNEL_UNREGISTERED = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "channelUnregistered"); private static final ClosedChannelException ON_CLOSE = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "close"); private static final ClosedChannelException ON_DEREGISTER = ThrowableUtil.unknownStackTrace(new ClosedChannelException(), RntbdRequestManager.class, "deregister"); private static final EventExecutor requestExpirationExecutor = new DefaultEventExecutor(new RntbdThreadFactory( "request-expirator", true, Thread.NORM_PRIORITY)); private static final Logger logger = LoggerFactory.getLogger(RntbdRequestManager.class); private final CompletableFuture<RntbdContext> contextFuture = new CompletableFuture<>(); private final CompletableFuture<RntbdContextRequest> contextRequestFuture = new CompletableFuture<>(); private final ChannelHealthChecker healthChecker; private final int pendingRequestLimit; private final ConcurrentHashMap<Long, RntbdRequestRecord> pendingRequests; private final Timestamps timestamps = new Timestamps(); private final RntbdConnectionStateListener rntbdConnectionStateListener; private final long idleConnectionTimerResolutionInNanos; private final long tcpNetworkRequestTimeoutInNanos; private final RntbdServerErrorInjector serverErrorInjector; private boolean closingExceptionally = false; private CoalescingBufferQueue pendingWrites; public RntbdRequestManager( final ChannelHealthChecker healthChecker, final int pendingRequestLimit, final RntbdConnectionStateListener connectionStateListener, final long idleConnectionTimerResolutionInNanos, final RntbdServerErrorInjector serverErrorInjector, final long tcpNetworkRequestTimeoutInNanos) { checkArgument(pendingRequestLimit > 0, "pendingRequestLimit: %s", pendingRequestLimit); checkNotNull(healthChecker, "healthChecker"); this.pendingRequests = new ConcurrentHashMap<>(pendingRequestLimit); this.pendingRequestLimit = pendingRequestLimit; this.healthChecker = healthChecker; this.rntbdConnectionStateListener = connectionStateListener; this.idleConnectionTimerResolutionInNanos = idleConnectionTimerResolutionInNanos; this.tcpNetworkRequestTimeoutInNanos = tcpNetworkRequestTimeoutInNanos; this.serverErrorInjector = serverErrorInjector; } /** * Gets called after the {@link ChannelHandler} was added to the actual context and it's ready to handle events. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerAdded(final ChannelHandlerContext context) { this.traceOperation(context, "handlerAdded"); } /** * Gets called after the {@link ChannelHandler} was removed from the actual context and it doesn't handle events * anymore. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void handlerRemoved(final ChannelHandlerContext context) { this.traceOperation(context, "handlerRemoved"); } /** * The {@link Channel} of the {@link ChannelHandlerContext} is now active * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelActive(final ChannelHandlerContext context) { this.traceOperation(context, "channelActive"); context.fireChannelActive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was registered and has reached the end of its lifetime * <p> * This method will only be called after the channel is closed. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelInactive(final ChannelHandlerContext context) { this.traceOperation(context, "channelInactive"); context.fireChannelInactive(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} has read a message from its peer. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs. * @param message The message read. */ @Override public void channelRead(final ChannelHandlerContext context, final Object message) { this.traceOperation(context, "channelRead"); this.timestamps.channelReadCompleted(); try { if (message.getClass() == RntbdResponse.class) { try { this.messageReceived(context, (RntbdResponse) message); } catch (CorruptedFrameException error) { this.exceptionCaught(context, error); } catch (Throwable throwable) { reportIssue(context, "{} ", message, throwable); this.exceptionCaught(context, throwable); } } else { final IllegalStateException error = new IllegalStateException( lenientFormat("expected message of %s, not %s: %s", RntbdResponse.class, message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } } finally { if (message instanceof ReferenceCounted) { boolean released = ((ReferenceCounted) message).release(); reportIssueUnless(released, context, "failed to release message: {}", message); } } } /** * The {@link Channel} of the {@link ChannelHandlerContext} has fully consumed the most-recent message read. * <p> * If {@link ChannelOption * {@link Channel} will be made until {@link ChannelHandlerContext * for outbound messages to be written. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelReadComplete(final ChannelHandlerContext context) { this.traceOperation(context, "channelReadComplete"); context.fireChannelReadComplete(); } /** * Constructs a {@link CoalescingBufferQueue} for buffering encoded requests until we have an {@link RntbdRequest} * <p> * This method then calls {@link ChannelHandlerContext * {@link ChannelInboundHandler} in the {@link ChannelPipeline}. * <p> * Sub-classes may override this method to change behavior. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made */ @Override public void channelRegistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelRegistered"); reportIssueUnless(this.pendingWrites == null, context, "pendingWrites: {}", pendingWrites); this.pendingWrites = new CoalescingBufferQueue(context.channel()); context.fireChannelRegistered(); } /** * The {@link Channel} of the {@link ChannelHandlerContext} was unregistered from its {@link EventLoop} * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelUnregistered(final ChannelHandlerContext context) { this.traceOperation(context, "channelUnregistered"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CHANNEL_UNREGISTERED); } else { logger.debug("{} channelUnregistered exceptionally", context); } context.fireChannelUnregistered(); } /** * Gets called once the writable state of a {@link Channel} changed. You can check the state with * {@link Channel * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs */ @Override public void channelWritabilityChanged(final ChannelHandlerContext context) { this.traceOperation(context, "channelWritabilityChanged"); context.fireChannelWritabilityChanged(); } /** * Processes {@link ChannelHandlerContext * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param cause Exception caught */ @Override @SuppressWarnings("deprecation") public void exceptionCaught(final ChannelHandlerContext context, final Throwable cause) { this.traceOperation(context, "exceptionCaught", cause); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, cause); if (logger.isDebugEnabled()) { logger.debug("{} closing due to:", context, cause); } context.flush().close(); } } /** * Processes inbound events triggered by channel handlers in the {@link RntbdClientChannelHandler} pipeline * <p> * All but inbound request management events are ignored. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager} belongs * @param event An object representing a user event */ @Override public void userEventTriggered(final ChannelHandlerContext context, final Object event) { this.traceOperation(context, "userEventTriggered", event); try { if (event instanceof IdleStateEvent) { if (this.healthChecker instanceof RntbdClientChannelHealthChecker) { ((RntbdClientChannelHealthChecker) this.healthChecker) .isHealthyWithFailureReason(context.channel()).addListener((Future<String> future) -> { final Throwable cause; if (future.isSuccess()) { if (RntbdConstants.RntbdHealthCheckResults.SuccessValue.equals(future.get())) { return; } cause = new UnhealthyChannelException(future.get()); } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); } else { this.healthChecker.isHealthy(context.channel()).addListener((Future<Boolean> future) -> { final Throwable cause; if (future.isSuccess()) { if (future.get()) { return; } cause = new UnhealthyChannelException( MessageFormat.format( "Custom ChannelHealthChecker {0} failed.", this.healthChecker.getClass().getSimpleName())); } else { cause = future.cause(); } this.exceptionCaught(context, cause); }); } return; } if (event instanceof RntbdContext) { this.contextFuture.complete((RntbdContext) event); this.removeContextNegotiatorAndFlushPendingWrites(context); this.timestamps.channelReadCompleted(); return; } if (event instanceof RntbdContextException) { this.contextFuture.completeExceptionally((RntbdContextException) event); this.exceptionCaught(context, (RntbdContextException)event); return; } if (event instanceof SslHandshakeCompletionEvent) { SslHandshakeCompletionEvent sslHandshakeCompletionEvent = (SslHandshakeCompletionEvent) event; if (sslHandshakeCompletionEvent.isSuccess()) { if (logger.isDebugEnabled()) { logger.debug("SslHandshake completed, adding idleStateHandler"); } context.pipeline().addAfter( SslHandler.class.toString(), IdleStateHandler.class.toString(), new IdleStateHandler( this.idleConnectionTimerResolutionInNanos, this.idleConnectionTimerResolutionInNanos, 0, TimeUnit.NANOSECONDS)); } else { if (logger.isDebugEnabled()) { logger.debug("SslHandshake failed", sslHandshakeCompletionEvent.cause()); } this.exceptionCaught(context, sslHandshakeCompletionEvent.cause()); return; } } if (event instanceof RntbdFaultInjectionConnectionResetEvent) { this.exceptionCaught(context, new IOException("Fault Injection Connection Reset")); return; } if (event instanceof RntbdFaultInjectionConnectionCloseEvent) { context.close(); return; } context.fireUserEventTriggered(event); } catch (Throwable error) { reportIssue(context, "{}: ", event, error); this.exceptionCaught(context, error); } } /** * Called once a bind operation is made. * * @param context the {@link ChannelHandlerContext} for which the bind operation is made * @param localAddress the {@link SocketAddress} to which it should bound * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void bind(final ChannelHandlerContext context, final SocketAddress localAddress, final ChannelPromise promise) { this.traceOperation(context, "bind", localAddress); context.bind(localAddress, promise); } /** * Called once a close operation is made. * * @param context the {@link ChannelHandlerContext} for which the close operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void close(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "close"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_CLOSE); } else { logger.debug("{} closed exceptionally", context); } final SslHandler sslHandler = context.pipeline().get(SslHandler.class); if (sslHandler != null) { try { sslHandler.closeOutbound(); } catch (Exception exception) { if (exception instanceof SSLException) { logger.debug( "SslException when attempting to close the outbound SSL connection: ", exception); } else { logger.warn( "Exception when attempting to close the outbound SSL connection: ", exception); throw exception; } } } context.close(promise); } /** * Called once a connect operation is made. * * @param context the {@link ChannelHandlerContext} for which the connect operation is made * @param remoteAddress the {@link SocketAddress} to which it should connect * @param localAddress the {@link SocketAddress} which is used as source on connect * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void connect( final ChannelHandlerContext context, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise ) { this.traceOperation(context, "connect", remoteAddress, localAddress); context.connect(remoteAddress, localAddress, promise); } /** * Called once a deregister operation is made from the current registered {@link EventLoop}. * * @param context the {@link ChannelHandlerContext} for which the deregister operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void deregister(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "deregister"); if (!this.closingExceptionally) { this.completeAllPendingRequestsExceptionally(context, ON_DEREGISTER); } else { logger.debug("{} deregistered exceptionally", context); } context.deregister(promise); } /** * Called once a disconnect operation is made. * * @param context the {@link ChannelHandlerContext} for which the disconnect operation is made * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void disconnect(final ChannelHandlerContext context, final ChannelPromise promise) { this.traceOperation(context, "disconnect"); context.disconnect(promise); } /** * Called once a flush operation is made * <p> * The flush operation will try to flush out all previous written messages that are pending. * * @param context the {@link ChannelHandlerContext} for which the flush operation is made */ @Override public void flush(final ChannelHandlerContext context) { this.traceOperation(context, "flush"); context.flush(); } /** * Intercepts {@link ChannelHandlerContext * * @param context the {@link ChannelHandlerContext} for which the read operation is made */ @Override public void read(final ChannelHandlerContext context) { this.traceOperation(context, "read"); context.read(); } /** * Called once a write operation is made * <p> * The write operation will send messages through the {@link ChannelPipeline} which are then ready to be flushed * to the actual {@link Channel}. This will occur when {@link Channel * * @param context the {@link ChannelHandlerContext} for which the write operation is made * @param message the message to write * @param promise the {@link ChannelPromise} to notify once the operation completes */ @Override public void write(final ChannelHandlerContext context, final Object message, final ChannelPromise promise) { this.traceOperation(context, "write", message); if (message instanceof RntbdRequestRecord) { final RntbdRequestRecord record = (RntbdRequestRecord) message; record.setTimestamps(this.timestamps); if (!record.isCancelled()) { record.setSendingRequestHasStarted(); this.timestamps.channelWriteAttempted(); if (this.serverErrorInjector != null) { if (this.serverErrorInjector.injectRntbdServerResponseError(record)) { return; } Consumer<Duration> writeRequestWithInjectedDelayConsumer = (delay) -> this.writeRequestWithInjectedDelay(context, record, promise, delay); if (this.serverErrorInjector.injectRntbdServerResponseDelay(record, writeRequestWithInjectedDelayConsumer)) { return; } } context.write(this.addPendingRequestRecord(context, record), promise).addListener(completed -> { record.stage(RntbdRequestRecord.Stage.SENT); if (completed.isSuccess()) { this.timestamps.channelWriteCompleted(); } }); } return; } if (message == RntbdHealthCheckRequest.MESSAGE) { context.write(RntbdHealthCheckRequest.MESSAGE, promise).addListener(completed -> { if (completed.isSuccess()) { this.timestamps.channelPingCompleted(); } }); return; } final IllegalStateException error = new IllegalStateException(lenientFormat("message of %s: %s", message.getClass(), message)); reportIssue(context, "", error); this.exceptionCaught(context, error); } private void writeRequestWithInjectedDelay( final ChannelHandlerContext context, final RntbdRequestRecord rntbdRequestRecord, final ChannelPromise promise, Duration delay) { this.addPendingRequestRecord(context, rntbdRequestRecord); rntbdRequestRecord.stage(RntbdRequestRecord.Stage.SENT); this.timestamps.channelWriteCompleted(); long effectiveDelayInNanos = Math.min(this.tcpNetworkRequestTimeoutInNanos, delay.toNanos()); context.executor().schedule( () -> { if (this.tcpNetworkRequestTimeoutInNanos <= delay.toNanos()) { return; } context.write(rntbdRequestRecord, promise); }, effectiveDelayInNanos, TimeUnit.NANOSECONDS ); } public RntbdChannelStatistics getChannelStatistics( Channel channel, RntbdChannelAcquisitionTimeline channelAcquisitionTimeline) { return new RntbdChannelStatistics() .channelId(channel.id().toString()) .pendingRequestsCount(this.pendingRequests.size()) .channelTaskQueueSize(RntbdUtils.tryGetExecutorTaskQueueSize(channel.eventLoop())) .lastReadTime(this.timestamps.lastChannelReadTime()) .transitTimeoutCount(this.timestamps.transitTimeoutCount()) .transitTimeoutStartingTime(this.timestamps.transitTimeoutStartingTime()) .waitForConnectionInit(channelAcquisitionTimeline.isWaitForChannelInit()); } int pendingRequestCount() { return this.pendingRequests.size(); } Optional<RntbdContext> rntbdContext() { return Optional.of(this.contextFuture.getNow(null)); } CompletableFuture<RntbdContextRequest> rntbdContextRequestFuture() { return this.contextRequestFuture; } boolean hasRequestedRntbdContext() { return this.contextRequestFuture.getNow(null) != null; } boolean hasRntbdContext() { return this.contextFuture.getNow(null) != null; } RntbdChannelState getChannelState(final int demand) { reportIssueUnless(this.hasRequestedRntbdContext(), this, "Direct TCP context request was not issued"); final int limit = this.hasRntbdContext() ? this.pendingRequestLimit : Math.min(this.pendingRequestLimit, demand); if (this.pendingRequests.size() < limit) { return RntbdChannelState.ok(this.pendingRequests.size()); } if (this.hasRntbdContext()) { return RntbdChannelState.pendingLimit(this.pendingRequests.size()); } else { return RntbdChannelState.contextNegotiationPending((this.pendingRequests.size())); } } void pendWrite(final ByteBuf out, final ChannelPromise promise) { this.pendingWrites.add(out, promise); } public Timestamps getTimestamps() { return this.timestamps; } Timestamps snapshotTimestamps() { return new Timestamps(this.timestamps); } private RntbdRequestRecord addPendingRequestRecord(final ChannelHandlerContext context, final RntbdRequestRecord record) { AtomicReference<Timeout> pendingRequestTimeout = new AtomicReference<>(); this.pendingRequests.compute(record.transportRequestId(), (id, current) -> { reportIssueUnless(current == null, context, "id: {}, current: {}, request: {}", record); pendingRequestTimeout.set(record.newTimeout(timeout -> { requestExpirationExecutor.execute(record::expire); })); return record; }); record.whenComplete((response, error) -> { this.pendingRequests.remove(record.transportRequestId()); if (pendingRequestTimeout.get() != null) { pendingRequestTimeout.get().cancel(); } }); return record; } private void completeAllPendingRequestsExceptionally( final ChannelHandlerContext context, final Throwable throwable) { reportIssueUnless(!this.closingExceptionally, context, "", throwable); this.closingExceptionally = true; if (this.pendingWrites != null && !this.pendingWrites.isEmpty()) { this.pendingWrites.releaseAndFailAll(context, throwable); } if (this.rntbdConnectionStateListener != null) { this.rntbdConnectionStateListener.onException(throwable); } if (this.pendingRequests.isEmpty()) { return; } if (!this.contextRequestFuture.isDone()) { this.contextRequestFuture.completeExceptionally(throwable); } if (!this.contextFuture.isDone()) { this.contextFuture.completeExceptionally(throwable); } final int count = this.pendingRequests.size(); Exception contextRequestException = null; String phrase = null; if (this.contextRequestFuture.isCompletedExceptionally()) { try { this.contextRequestFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request write cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request write failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request write failed"; contextRequestException = new ChannelException(error); } } else if (this.contextFuture.isCompletedExceptionally()) { try { this.contextFuture.get(); } catch (final CancellationException error) { phrase = "RNTBD context request read cancelled"; contextRequestException = error; } catch (final Exception error) { phrase = "RNTBD context request read failed"; contextRequestException = error; } catch (final Throwable error) { phrase = "RNTBD context request read failed"; contextRequestException = new ChannelException(error); } } else { phrase = "closed exceptionally"; } final String message = lenientFormat("%s %s with %s pending requests", context, phrase, count); final Exception cause; if (throwable instanceof ClosedChannelException) { cause = contextRequestException == null ? (ClosedChannelException) throwable : contextRequestException; } else { cause = throwable instanceof Exception ? (Exception) throwable : new ChannelException(throwable); } for (RntbdRequestRecord record : this.pendingRequests.values()) { final Map<String, String> requestHeaders = record.args().serviceRequest().getHeaders(); final String requestUri = record.args().physicalAddressUri().getURI().toString(); final GoneException error = new GoneException(message, cause, null, requestUri); BridgeInternal.setRequestHeaders(error, requestHeaders); record.completeExceptionally(error); } } /** * This method is called for each incoming message of type {@link RntbdResponse} to complete a request. * * @param context {@link ChannelHandlerContext} to which this {@link RntbdRequestManager request manager} belongs. * @param response the {@link RntbdResponse message} received. */ private void removeContextNegotiatorAndFlushPendingWrites(final ChannelHandlerContext context) { final RntbdContextNegotiator negotiator = context.pipeline().get(RntbdContextNegotiator.class); negotiator.removeInboundHandler(); negotiator.removeOutboundHandler(); if (!this.pendingWrites.isEmpty()) { this.pendingWrites.writeAndRemoveAll(context); context.flush(); } } private static void reportIssue(final Object subject, final String format, final Object... args) { RntbdReporter.reportIssue(logger, subject, format, args); } private static void reportIssueUnless( final boolean predicate, final Object subject, final String format, final Object... args ) { RntbdReporter.reportIssueUnless(logger, predicate, subject, format, args); } private void traceOperation(final ChannelHandlerContext context, final String operationName, final Object... args) { logger.trace("{}\n{}\n{}", operationName, context, args); } public final static class UnhealthyChannelException extends ChannelException { public UnhealthyChannelException(String reason) { super("health check failed, reason: " + reason); } @Override public Throwable fillInStackTrace() { return this; } } }
I think you are missing one %s Should start with %s {%s=%s, error...} if I understand this correctly?
public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if (StringUtils.isNotEmpty(this.faultInjectionRuleId)) { exceptionMessageNode.put("faultInjectionRuleId", this.faultInjectionRuleId); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return String.format( "%s {%s=, error=%s, resourceAddress=%s, statusCode=%s, message=%s, causeInfo=%s, responseHeaders=%s, requestHeaders=%s, faultInjectionRuleId=[%s] }", getClass().getSimpleName(), USER_AGENT_KEY, USER_AGENT, cosmosError, resourceAddress, statusCode, getMessage(), causeInfo(), responseHeaders, filterSensitiveData(requestHeaders), this.faultInjectionRuleId); } }
"%s {%s=, error=%s, resourceAddress=%s, statusCode=%s, message=%s, causeInfo=%s, responseHeaders=%s, requestHeaders=%s, faultInjectionRuleId=[%s] }",
public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if (StringUtils.isNotEmpty(this.faultInjectionRuleId)) { exceptionMessageNode.put("faultInjectionRuleId", this.faultInjectionRuleId); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return String.format( "%s {%s=%s, error=%s, resourceAddress=%s, statusCode=%s, message=%s, causeInfo=%s, responseHeaders=%s, requestHeaders=%s, faultInjectionRuleId=[%s] }", getClass().getSimpleName(), USER_AGENT_KEY, USER_AGENT, cosmosError, resourceAddress, statusCode, getMessage(), causeInfo(), responseHeaders, filterSensitiveData(requestHeaders), this.faultInjectionRuleId); } }
class CosmosException extends AzureException { private static final long MAX_RETRY_AFTER_IN_MS = BatchExecUtils.MAX_RETRY_AFTER_IN_MS; private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = new ObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); /** * Status code */ private final int statusCode; /** * Response headers */ private final Map<String, String> responseHeaders; /** * Cosmos diagnostics */ private CosmosDiagnostics cosmosDiagnostics; /** * Request timeline */ private RequestTimeline requestTimeline; /** * Channel acquisition timeline */ private RntbdChannelAcquisitionTimeline channelAcquisitionTimeline; /** * Cosmos error */ private CosmosError cosmosError; /** * RNTBD endpoint statistics */ private RntbdEndpointStatistics rntbdEndpointStatistics; /** * RNTBD endpoint statistics */ private RntbdChannelStatistics rntbdChannelStatistics; /** * LSN */ long lsn; /** * Partition key range ID */ String partitionKeyRangeId; /** * Request headers */ Map<String, String> requestHeaders; /** * Request URI */ Uri requestUri; /** * Resource address */ String resourceAddress; /** * Request payload length */ private int requestPayloadLength; /** * RNTBD request length */ private int rntbdRequestLength; /** * RNTBD response length */ private int rntbdResponseLength; /** * Sending request has started */ private boolean sendingRequestHasStarted; /*** * All selectable replica status. */ private final List<String> replicaStatusList = new ArrayList<>(); /** * Fault injection ruleId */ private String faultInjectionRuleId; /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param message the string message. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = new ConcurrentHashMap<>(); if (responseHeaders != null) { for (Map.Entry<String, String> entry: responseHeaders.entrySet()) { if (entry.getKey() != null && entry.getValue() != null) { this.responseHeaders.put(entry.getKey(), entry.getValue()); } } } } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Math.min(Long.parseLong(header), MAX_RETRY_AFTER_IN_MS); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.parseDouble(value); } @Override String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } RntbdChannelAcquisitionTimeline getChannelAcquisitionTimeline() { return this.channelAcquisitionTimeline; } void setChannelAcquisitionTimeline(RntbdChannelAcquisitionTimeline channelAcquisitionTimeline) { this.channelAcquisitionTimeline = channelAcquisitionTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } RntbdChannelStatistics getRntbdChannelStatistics() { return this.rntbdChannelStatistics; } void setRntbdChannelStatistics(RntbdChannelStatistics rntbdChannelStatistics) { this.rntbdChannelStatistics = rntbdChannelStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } void setFaultInjectionRuleId(String faultInjectionRUleId) { this.faultInjectionRuleId = faultInjectionRUleId; } String getFaultInjectionRuleId() { return this.faultInjectionRuleId; } List<String> getReplicaStatusList() { return this.replicaStatusList; } static void initialize() { ImplementationBridgeHelpers.CosmosExceptionHelper.setCosmosExceptionAccessor( new ImplementationBridgeHelpers.CosmosExceptionHelper.CosmosExceptionAccessor() { @Override public CosmosException createCosmosException(int statusCode, Exception innerException) { return new CosmosException(statusCode, innerException); } @Override public List<String> getReplicaStatusList(CosmosException cosmosException) { return cosmosException.getReplicaStatusList(); } @Override public CosmosException setRntbdChannelStatistics( CosmosException cosmosException, RntbdChannelStatistics rntbdChannelStatistics) { cosmosException.setRntbdChannelStatistics(rntbdChannelStatistics); return cosmosException; } @Override public RntbdChannelStatistics getRntbdChannelStatistics(CosmosException cosmosException) { return cosmosException.getRntbdChannelStatistics(); } @Override public void setFaultInjectionRuleId(CosmosException cosmosException, String faultInjectionRuleId) { cosmosException.setFaultInjectionRuleId(faultInjectionRuleId); } @Override public String getFaultInjectionRuleId(CosmosException cosmosException) { return cosmosException.getFaultInjectionRuleId(); } }); } static { initialize(); } }
class CosmosException extends AzureException { private static final long MAX_RETRY_AFTER_IN_MS = BatchExecUtils.MAX_RETRY_AFTER_IN_MS; private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = new ObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); /** * Status code */ private final int statusCode; /** * Response headers */ private final Map<String, String> responseHeaders; /** * Cosmos diagnostics */ private CosmosDiagnostics cosmosDiagnostics; /** * Request timeline */ private RequestTimeline requestTimeline; /** * Channel acquisition timeline */ private RntbdChannelAcquisitionTimeline channelAcquisitionTimeline; /** * Cosmos error */ private CosmosError cosmosError; /** * RNTBD endpoint statistics */ private RntbdEndpointStatistics rntbdEndpointStatistics; /** * RNTBD endpoint statistics */ private RntbdChannelStatistics rntbdChannelStatistics; /** * LSN */ long lsn; /** * Partition key range ID */ String partitionKeyRangeId; /** * Request headers */ Map<String, String> requestHeaders; /** * Request URI */ Uri requestUri; /** * Resource address */ String resourceAddress; /** * Request payload length */ private int requestPayloadLength; /** * RNTBD request length */ private int rntbdRequestLength; /** * RNTBD response length */ private int rntbdResponseLength; /** * Sending request has started */ private boolean sendingRequestHasStarted; /*** * All selectable replica status. */ private final List<String> replicaStatusList = new ArrayList<>(); /** * Fault injection ruleId */ private String faultInjectionRuleId; /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param message the string message. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = new ConcurrentHashMap<>(); if (responseHeaders != null) { for (Map.Entry<String, String> entry: responseHeaders.entrySet()) { if (entry.getKey() != null && entry.getValue() != null) { this.responseHeaders.put(entry.getKey(), entry.getValue()); } } } } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Math.min(Long.parseLong(header), MAX_RETRY_AFTER_IN_MS); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.parseDouble(value); } @Override String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } RntbdChannelAcquisitionTimeline getChannelAcquisitionTimeline() { return this.channelAcquisitionTimeline; } void setChannelAcquisitionTimeline(RntbdChannelAcquisitionTimeline channelAcquisitionTimeline) { this.channelAcquisitionTimeline = channelAcquisitionTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } RntbdChannelStatistics getRntbdChannelStatistics() { return this.rntbdChannelStatistics; } void setRntbdChannelStatistics(RntbdChannelStatistics rntbdChannelStatistics) { this.rntbdChannelStatistics = rntbdChannelStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } void setFaultInjectionRuleId(String faultInjectionRUleId) { this.faultInjectionRuleId = faultInjectionRUleId; } String getFaultInjectionRuleId() { return this.faultInjectionRuleId; } List<String> getReplicaStatusList() { return this.replicaStatusList; } static void initialize() { ImplementationBridgeHelpers.CosmosExceptionHelper.setCosmosExceptionAccessor( new ImplementationBridgeHelpers.CosmosExceptionHelper.CosmosExceptionAccessor() { @Override public CosmosException createCosmosException(int statusCode, Exception innerException) { return new CosmosException(statusCode, innerException); } @Override public List<String> getReplicaStatusList(CosmosException cosmosException) { return cosmosException.getReplicaStatusList(); } @Override public CosmosException setRntbdChannelStatistics( CosmosException cosmosException, RntbdChannelStatistics rntbdChannelStatistics) { cosmosException.setRntbdChannelStatistics(rntbdChannelStatistics); return cosmosException; } @Override public RntbdChannelStatistics getRntbdChannelStatistics(CosmosException cosmosException) { return cosmosException.getRntbdChannelStatistics(); } @Override public void setFaultInjectionRuleId(CosmosException cosmosException, String faultInjectionRuleId) { cosmosException.setFaultInjectionRuleId(faultInjectionRuleId); } @Override public String getFaultInjectionRuleId(CosmosException cosmosException) { return cosmosException.getFaultInjectionRuleId(); } }); } static { initialize(); } }
you are absolutely correct, updated
public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if (StringUtils.isNotEmpty(this.faultInjectionRuleId)) { exceptionMessageNode.put("faultInjectionRuleId", this.faultInjectionRuleId); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return String.format( "%s {%s=, error=%s, resourceAddress=%s, statusCode=%s, message=%s, causeInfo=%s, responseHeaders=%s, requestHeaders=%s, faultInjectionRuleId=[%s] }", getClass().getSimpleName(), USER_AGENT_KEY, USER_AGENT, cosmosError, resourceAddress, statusCode, getMessage(), causeInfo(), responseHeaders, filterSensitiveData(requestHeaders), this.faultInjectionRuleId); } }
"%s {%s=, error=%s, resourceAddress=%s, statusCode=%s, message=%s, causeInfo=%s, responseHeaders=%s, requestHeaders=%s, faultInjectionRuleId=[%s] }",
public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if (StringUtils.isNotEmpty(this.faultInjectionRuleId)) { exceptionMessageNode.put("faultInjectionRuleId", this.faultInjectionRuleId); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return String.format( "%s {%s=%s, error=%s, resourceAddress=%s, statusCode=%s, message=%s, causeInfo=%s, responseHeaders=%s, requestHeaders=%s, faultInjectionRuleId=[%s] }", getClass().getSimpleName(), USER_AGENT_KEY, USER_AGENT, cosmosError, resourceAddress, statusCode, getMessage(), causeInfo(), responseHeaders, filterSensitiveData(requestHeaders), this.faultInjectionRuleId); } }
class CosmosException extends AzureException { private static final long MAX_RETRY_AFTER_IN_MS = BatchExecUtils.MAX_RETRY_AFTER_IN_MS; private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = new ObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); /** * Status code */ private final int statusCode; /** * Response headers */ private final Map<String, String> responseHeaders; /** * Cosmos diagnostics */ private CosmosDiagnostics cosmosDiagnostics; /** * Request timeline */ private RequestTimeline requestTimeline; /** * Channel acquisition timeline */ private RntbdChannelAcquisitionTimeline channelAcquisitionTimeline; /** * Cosmos error */ private CosmosError cosmosError; /** * RNTBD endpoint statistics */ private RntbdEndpointStatistics rntbdEndpointStatistics; /** * RNTBD endpoint statistics */ private RntbdChannelStatistics rntbdChannelStatistics; /** * LSN */ long lsn; /** * Partition key range ID */ String partitionKeyRangeId; /** * Request headers */ Map<String, String> requestHeaders; /** * Request URI */ Uri requestUri; /** * Resource address */ String resourceAddress; /** * Request payload length */ private int requestPayloadLength; /** * RNTBD request length */ private int rntbdRequestLength; /** * RNTBD response length */ private int rntbdResponseLength; /** * Sending request has started */ private boolean sendingRequestHasStarted; /*** * All selectable replica status. */ private final List<String> replicaStatusList = new ArrayList<>(); /** * Fault injection ruleId */ private String faultInjectionRuleId; /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param message the string message. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = new ConcurrentHashMap<>(); if (responseHeaders != null) { for (Map.Entry<String, String> entry: responseHeaders.entrySet()) { if (entry.getKey() != null && entry.getValue() != null) { this.responseHeaders.put(entry.getKey(), entry.getValue()); } } } } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Math.min(Long.parseLong(header), MAX_RETRY_AFTER_IN_MS); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.parseDouble(value); } @Override String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } RntbdChannelAcquisitionTimeline getChannelAcquisitionTimeline() { return this.channelAcquisitionTimeline; } void setChannelAcquisitionTimeline(RntbdChannelAcquisitionTimeline channelAcquisitionTimeline) { this.channelAcquisitionTimeline = channelAcquisitionTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } RntbdChannelStatistics getRntbdChannelStatistics() { return this.rntbdChannelStatistics; } void setRntbdChannelStatistics(RntbdChannelStatistics rntbdChannelStatistics) { this.rntbdChannelStatistics = rntbdChannelStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } void setFaultInjectionRuleId(String faultInjectionRUleId) { this.faultInjectionRuleId = faultInjectionRUleId; } String getFaultInjectionRuleId() { return this.faultInjectionRuleId; } List<String> getReplicaStatusList() { return this.replicaStatusList; } static void initialize() { ImplementationBridgeHelpers.CosmosExceptionHelper.setCosmosExceptionAccessor( new ImplementationBridgeHelpers.CosmosExceptionHelper.CosmosExceptionAccessor() { @Override public CosmosException createCosmosException(int statusCode, Exception innerException) { return new CosmosException(statusCode, innerException); } @Override public List<String> getReplicaStatusList(CosmosException cosmosException) { return cosmosException.getReplicaStatusList(); } @Override public CosmosException setRntbdChannelStatistics( CosmosException cosmosException, RntbdChannelStatistics rntbdChannelStatistics) { cosmosException.setRntbdChannelStatistics(rntbdChannelStatistics); return cosmosException; } @Override public RntbdChannelStatistics getRntbdChannelStatistics(CosmosException cosmosException) { return cosmosException.getRntbdChannelStatistics(); } @Override public void setFaultInjectionRuleId(CosmosException cosmosException, String faultInjectionRuleId) { cosmosException.setFaultInjectionRuleId(faultInjectionRuleId); } @Override public String getFaultInjectionRuleId(CosmosException cosmosException) { return cosmosException.getFaultInjectionRuleId(); } }); } static { initialize(); } }
class CosmosException extends AzureException { private static final long MAX_RETRY_AFTER_IN_MS = BatchExecUtils.MAX_RETRY_AFTER_IN_MS; private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = new ObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); /** * Status code */ private final int statusCode; /** * Response headers */ private final Map<String, String> responseHeaders; /** * Cosmos diagnostics */ private CosmosDiagnostics cosmosDiagnostics; /** * Request timeline */ private RequestTimeline requestTimeline; /** * Channel acquisition timeline */ private RntbdChannelAcquisitionTimeline channelAcquisitionTimeline; /** * Cosmos error */ private CosmosError cosmosError; /** * RNTBD endpoint statistics */ private RntbdEndpointStatistics rntbdEndpointStatistics; /** * RNTBD endpoint statistics */ private RntbdChannelStatistics rntbdChannelStatistics; /** * LSN */ long lsn; /** * Partition key range ID */ String partitionKeyRangeId; /** * Request headers */ Map<String, String> requestHeaders; /** * Request URI */ Uri requestUri; /** * Resource address */ String resourceAddress; /** * Request payload length */ private int requestPayloadLength; /** * RNTBD request length */ private int rntbdRequestLength; /** * RNTBD response length */ private int rntbdResponseLength; /** * Sending request has started */ private boolean sendingRequestHasStarted; /*** * All selectable replica status. */ private final List<String> replicaStatusList = new ArrayList<>(); /** * Fault injection ruleId */ private String faultInjectionRuleId; /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param message the string message. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = new ConcurrentHashMap<>(); if (responseHeaders != null) { for (Map.Entry<String, String> entry: responseHeaders.entrySet()) { if (entry.getKey() != null && entry.getValue() != null) { this.responseHeaders.put(entry.getKey(), entry.getValue()); } } } } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Math.min(Long.parseLong(header), MAX_RETRY_AFTER_IN_MS); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.parseDouble(value); } @Override String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } RntbdChannelAcquisitionTimeline getChannelAcquisitionTimeline() { return this.channelAcquisitionTimeline; } void setChannelAcquisitionTimeline(RntbdChannelAcquisitionTimeline channelAcquisitionTimeline) { this.channelAcquisitionTimeline = channelAcquisitionTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } RntbdChannelStatistics getRntbdChannelStatistics() { return this.rntbdChannelStatistics; } void setRntbdChannelStatistics(RntbdChannelStatistics rntbdChannelStatistics) { this.rntbdChannelStatistics = rntbdChannelStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } void setFaultInjectionRuleId(String faultInjectionRUleId) { this.faultInjectionRuleId = faultInjectionRUleId; } String getFaultInjectionRuleId() { return this.faultInjectionRuleId; } List<String> getReplicaStatusList() { return this.replicaStatusList; } static void initialize() { ImplementationBridgeHelpers.CosmosExceptionHelper.setCosmosExceptionAccessor( new ImplementationBridgeHelpers.CosmosExceptionHelper.CosmosExceptionAccessor() { @Override public CosmosException createCosmosException(int statusCode, Exception innerException) { return new CosmosException(statusCode, innerException); } @Override public List<String> getReplicaStatusList(CosmosException cosmosException) { return cosmosException.getReplicaStatusList(); } @Override public CosmosException setRntbdChannelStatistics( CosmosException cosmosException, RntbdChannelStatistics rntbdChannelStatistics) { cosmosException.setRntbdChannelStatistics(rntbdChannelStatistics); return cosmosException; } @Override public RntbdChannelStatistics getRntbdChannelStatistics(CosmosException cosmosException) { return cosmosException.getRntbdChannelStatistics(); } @Override public void setFaultInjectionRuleId(CosmosException cosmosException, String faultInjectionRuleId) { cosmosException.setFaultInjectionRuleId(faultInjectionRuleId); } @Override public String getFaultInjectionRuleId(CosmosException cosmosException) { return cosmosException.getFaultInjectionRuleId(); } }); } static { initialize(); } }
is this okie?
public static FaultInjectionRuleAccessor getFaultInjectionRuleAccessor() { if (!faultInjectionRuleClassLoaded.get()) { logger.debug("Initializing FaultInjectionRuleAccessor..."); } FaultInjectionRuleAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("FaultInjectionRuleAccessor is not initialized yet!"); System.exit(8700); } return snapshot; }
System.exit(8700);
public static FaultInjectionRuleAccessor getFaultInjectionRuleAccessor() { if (!faultInjectionRuleClassLoaded.get()) { logger.debug("Initializing FaultInjectionRuleAccessor..."); } FaultInjectionRuleAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("FaultInjectionRuleAccessor is not initialized yet!"); System.exit(8700); } return snapshot; }
class FaultInjectionRuleHelper { private final static AtomicBoolean faultInjectionRuleClassLoaded = new AtomicBoolean(false); private final static AtomicReference<FaultInjectionRuleAccessor> accessor = new AtomicReference<>(); private FaultInjectionRuleHelper() { } public static void setFaultInjectionRuleAccessor( final FaultInjectionRuleAccessor newAccessor) { assert(newAccessor != null); if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("FaultInjectionRuleAccessor already initialized!"); } else { logger.debug("Setting FaultInjectionRuleAccessor..."); faultInjectionRuleClassLoaded.set(true); } } public interface FaultInjectionRuleAccessor { void setEffectiveFaultInjectionRule(FaultInjectionRule rule, IFaultInjectionRuleInternal ruleInternal); } }
class FaultInjectionRuleHelper { private static final AtomicBoolean faultInjectionRuleClassLoaded = new AtomicBoolean(false); private static final AtomicReference<FaultInjectionRuleAccessor> accessor = new AtomicReference<>(); private FaultInjectionRuleHelper() { } public static void setFaultInjectionRuleAccessor( final FaultInjectionRuleAccessor newAccessor) { assert (newAccessor != null); if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("FaultInjectionRuleAccessor already initialized!"); } else { logger.debug("Setting FaultInjectionRuleAccessor..."); faultInjectionRuleClassLoaded.set(true); } } public interface FaultInjectionRuleAccessor { void setEffectiveFaultInjectionRule(FaultInjectionRule rule, IFaultInjectionRuleInternal ruleInternal); } }
This should add null checking
public static Context enableSyncRestProxy(Context context) { return context.addData(HTTP_REST_PROXY_SYNC_PROXY_ENABLE, true); }
return context.addData(HTTP_REST_PROXY_SYNC_PROXY_ENABLE, true);
public static Context enableSyncRestProxy(Context context) { context = context == null ? Context.NONE : context; return context.addData(HTTP_REST_PROXY_SYNC_PROXY_ENABLE, true); }
class Utility { private static final String HTTP_REST_PROXY_SYNC_PROXY_ENABLE = "com.azure.core.http.restproxy.syncproxy.enable"; public static final String APP_CONFIG_TRACING_NAMESPACE_VALUE = "Microsoft.AppConfiguration"; static final String ID = "id"; static final String DESCRIPTION = "description"; static final String DISPLAY_NAME = "display_name"; static final String ENABLED = "enabled"; static final String CONDITIONS = "conditions"; static final String CLIENT_FILTERS = "client_filters"; static final String NAME = "name"; static final String PARAMETERS = "parameters"; static final String URI = "uri"; /** * Represents any value in Etag. */ public static final String ETAG_ANY = "*"; /* * Translate public ConfigurationSetting to KeyValue autorest generated class. */ public static KeyValue toKeyValue(ConfigurationSetting setting) { return new KeyValue() .setKey(setting.getKey()) .setValue(setting.getValue()) .setLabel(setting.getLabel()) .setContentType(setting.getContentType()) .setEtag(setting.getETag()) .setLastModified(setting.getLastModified()) .setLocked(setting.isReadOnly()) .setTags(setting.getTags()); } public static SettingFields[] toSettingFieldsArray(List<KeyValueFields> kvFieldsList) { return kvFieldsList.stream() .map(keyValueFields -> toSettingFields(keyValueFields)) .collect(Collectors.toList()) .toArray(new SettingFields[kvFieldsList.size()]); } public static SettingFields toSettingFields(KeyValueFields keyValueFields) { return keyValueFields == null ? null : SettingFields.fromString(keyValueFields.toString()); } public static List<KeyValueFields> toKeyValueFieldsList(SettingFields[] settingFieldsArray) { return Arrays.stream(settingFieldsArray) .map(settingFields -> toKeyValueFields(settingFields)) .collect(Collectors.toList()); } public static KeyValueFields toKeyValueFields(SettingFields settingFields) { return settingFields == null ? null : KeyValueFields.fromString(settingFields.toString()); } /* * Azure Configuration service requires that the ETag value is surrounded in quotation marks. * * @param ETag The ETag to get the value for. If null is pass in, an empty string is returned. * @return The ETag surrounded by quotations. (ex. "ETag") */ private static String getETagValue(String etag) { return (etag == null || "*".equals(etag)) ? etag : "\"" + etag + "\""; } /* * Get HTTP header value, if-match. Used to perform an operation only if the targeted resource's etag matches the * value provided. */ public static String getIfMatchETag(boolean ifUnchanged, ConfigurationSetting setting) { return ifUnchanged ? getETagValue(setting.getETag()) : null; } /* * Get HTTP header value, if-none-match. Used to perform an operation only if the targeted resource's etag does not * match the value provided. */ public static String getIfNoneMatchETag(boolean onlyIfChanged, ConfigurationSetting setting) { return onlyIfChanged ? getETagValue(setting.getETag()) : null; } /* * Ensure that setting is not null. And, key cannot be null because it is part of the service REST URL. */ public static void validateSetting(ConfigurationSetting setting) { Objects.requireNonNull(setting); if (setting.getKey() == null) { throw new IllegalArgumentException("Parameter 'key' is required and cannot be null."); } } /** * Enable the sync stack rest proxy. * * @param context It offers a means of passing arbitrary data (key-value pairs) to pipeline policies. * Most applications do not need to pass arbitrary data to the pipeline and can pass Context.NONE or null. * * @return The Context. */ public static Context addTracingNamespace(Context context) { context = context == null ? Context.NONE : context; return context.addData(AZ_TRACING_NAMESPACE_KEY, APP_CONFIG_TRACING_NAMESPACE_VALUE); } }
class Utility { private static final String HTTP_REST_PROXY_SYNC_PROXY_ENABLE = "com.azure.core.http.restproxy.syncproxy.enable"; public static final String APP_CONFIG_TRACING_NAMESPACE_VALUE = "Microsoft.AppConfiguration"; static final String ID = "id"; static final String DESCRIPTION = "description"; static final String DISPLAY_NAME = "display_name"; static final String ENABLED = "enabled"; static final String CONDITIONS = "conditions"; static final String CLIENT_FILTERS = "client_filters"; static final String NAME = "name"; static final String PARAMETERS = "parameters"; static final String URI = "uri"; /** * Represents any value in Etag. */ public static final String ETAG_ANY = "*"; /* * Translate public ConfigurationSetting to KeyValue autorest generated class. */ public static KeyValue toKeyValue(ConfigurationSetting setting) { return new KeyValue() .setKey(setting.getKey()) .setValue(setting.getValue()) .setLabel(setting.getLabel()) .setContentType(setting.getContentType()) .setEtag(setting.getETag()) .setLastModified(setting.getLastModified()) .setLocked(setting.isReadOnly()) .setTags(setting.getTags()); } public static SettingFields[] toSettingFieldsArray(List<KeyValueFields> kvFieldsList) { int size = kvFieldsList.size(); SettingFields[] fields = new SettingFields[size]; for (int i = 0; i < size; i++) { fields[i] = toSettingFields(kvFieldsList.get(i)); } return fields; } public static SettingFields toSettingFields(KeyValueFields keyValueFields) { return keyValueFields == null ? null : SettingFields.fromString(keyValueFields.toString()); } public static List<KeyValueFields> toKeyValueFieldsList(SettingFields[] settingFieldsArray) { int size = settingFieldsArray.length; List<KeyValueFields> keyValueFields = new ArrayList<>(size); for (int i = 0; i < size; i++) { keyValueFields.add(toKeyValueFields(settingFieldsArray[i])); } return keyValueFields; } public static KeyValueFields toKeyValueFields(SettingFields settingFields) { return settingFields == null ? null : KeyValueFields.fromString(settingFields.toString()); } /* * Azure Configuration service requires that the ETag value is surrounded in quotation marks. * * @param ETag The ETag to get the value for. If null is pass in, an empty string is returned. * @return The ETag surrounded by quotations. (ex. "ETag") */ private static String getETagValue(String etag) { return (etag == null || "*".equals(etag)) ? etag : "\"" + etag + "\""; } /* * Get HTTP header value, if-match or if-none-match.. Used to perform an operation only if the targeted resource's * etag matches the value provided. */ public static String getEtag(boolean isEtagRequired, ConfigurationSetting setting) { return isEtagRequired ? getETagValue(setting.getETag()) : null; } /* * Ensure that setting is not null. And, key cannot be null because it is part of the service REST URL. */ public static void validateSetting(ConfigurationSetting setting) { Objects.requireNonNull(setting); if (setting.getKey() == null) { throw new IllegalArgumentException("Parameter 'key' is required and cannot be null."); } } /* * Asynchronously validate that setting and key is not null. The key is used in the service URL, * so it cannot be null. */ public static Mono<ConfigurationSetting> validateSettingAsync(ConfigurationSetting setting) { if (setting == null) { return Mono.error(new NullPointerException("Configuration setting cannot be null")); } if (setting.getKey() == null) { return Mono.error(new IllegalArgumentException("Parameter 'key' is required and cannot be null.")); } return Mono.just(setting); } /** * Enable the sync stack rest proxy. * * @param context It offers a means of passing arbitrary data (key-value pairs) to pipeline policies. * Most applications do not need to pass arbitrary data to the pipeline and can pass Context.NONE or null. * * @return The Context. */ public static Context addTracingNamespace(Context context) { context = context == null ? Context.NONE : context; return context.addData(AZ_TRACING_NAMESPACE_KEY, APP_CONFIG_TRACING_NAMESPACE_VALUE); } }
discussed offline, keep it is
public static FaultInjectionRuleAccessor getFaultInjectionRuleAccessor() { if (!faultInjectionRuleClassLoaded.get()) { logger.debug("Initializing FaultInjectionRuleAccessor..."); } FaultInjectionRuleAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("FaultInjectionRuleAccessor is not initialized yet!"); System.exit(8700); } return snapshot; }
System.exit(8700);
public static FaultInjectionRuleAccessor getFaultInjectionRuleAccessor() { if (!faultInjectionRuleClassLoaded.get()) { logger.debug("Initializing FaultInjectionRuleAccessor..."); } FaultInjectionRuleAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("FaultInjectionRuleAccessor is not initialized yet!"); System.exit(8700); } return snapshot; }
class FaultInjectionRuleHelper { private final static AtomicBoolean faultInjectionRuleClassLoaded = new AtomicBoolean(false); private final static AtomicReference<FaultInjectionRuleAccessor> accessor = new AtomicReference<>(); private FaultInjectionRuleHelper() { } public static void setFaultInjectionRuleAccessor( final FaultInjectionRuleAccessor newAccessor) { assert(newAccessor != null); if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("FaultInjectionRuleAccessor already initialized!"); } else { logger.debug("Setting FaultInjectionRuleAccessor..."); faultInjectionRuleClassLoaded.set(true); } } public interface FaultInjectionRuleAccessor { void setEffectiveFaultInjectionRule(FaultInjectionRule rule, IFaultInjectionRuleInternal ruleInternal); } }
class FaultInjectionRuleHelper { private static final AtomicBoolean faultInjectionRuleClassLoaded = new AtomicBoolean(false); private static final AtomicReference<FaultInjectionRuleAccessor> accessor = new AtomicReference<>(); private FaultInjectionRuleHelper() { } public static void setFaultInjectionRuleAccessor( final FaultInjectionRuleAccessor newAccessor) { assert (newAccessor != null); if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("FaultInjectionRuleAccessor already initialized!"); } else { logger.debug("Setting FaultInjectionRuleAccessor..."); faultInjectionRuleClassLoaded.set(true); } } public interface FaultInjectionRuleAccessor { void setEffectiveFaultInjectionRule(FaultInjectionRule rule, IFaultInjectionRuleInternal ruleInternal); } }
Might be pedantic but I would re-order these checks to start with length validation followed by `startsWith` validation, finishing with SHA validation, as this is the order of which is cheapest to validate.
public static void validateDigest(MessageDigest messageDigest, String requestedDigest) { String sha256 = byteArrayToHex(messageDigest.digest()); if (!requestedDigest.endsWith(sha256) || !requestedDigest.startsWith("sha256:") || requestedDigest.length() != 71) { throw LOGGER.atError() .addKeyValue("requestedDigest", requestedDigest) .addKeyValue("actualDigest", () -> "sha256:" + sha256) .log(new ServiceResponseException("The digest in the response does not match the expected digest.")); } }
|| requestedDigest.length() != 71) {
public static void validateDigest(MessageDigest messageDigest, String requestedDigest) { String sha256 = byteArrayToHex(messageDigest.digest()); if (requestedDigest.length() != 71 || !requestedDigest.startsWith("sha256:") || !requestedDigest.endsWith(sha256)) { throw LOGGER.atError() .addKeyValue("requestedDigest", requestedDigest) .addKeyValue("actualDigest", () -> "sha256:" + sha256) .log(new ServiceResponseException("The digest in the response does not match the expected digest.")); } }
class UtilsImpl { private static final ClientLogger LOGGER = new ClientLogger(UtilsImpl.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-containers-containerregistry.properties"); private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); private static final int HTTP_STATUS_CODE_NOT_FOUND = 404; private static final int HTTP_STATUS_CODE_ACCEPTED = 202; private static final HttpHeaderName CONTINUATION_LINK_HEADER_NAME = HttpHeaderName.fromString("Link"); private static final String HTTP_REST_PROXY_SYNC_PROXY_ENABLE = "com.azure.core.http.restproxy.syncproxy.enable"; public static final HttpHeaderName DOCKER_DIGEST_HEADER_NAME = HttpHeaderName.fromString("docker-content-digest"); public static final String OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json"; public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; public static final int CHUNK_SIZE = 4 * 1024 * 1024; private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildHttpPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); policies.add(new ContainerRegistryRedirectPolicy()); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions); if (credential == null) { LOGGER.verbose("Credentials are null, enabling anonymous access"); } ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies); credentialPolicies.add(loggingPolicy); if (audience == null) { audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD; } ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService( credential, audience, endpoint, serviceVersion, new HttpPipelineBuilder() .policies(credentialPolicies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(), JacksonAdapter.createDefaultSerializerAdapter()); ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService); policies.add(credentialsPolicy); policies.add(loggingPolicy); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } @SuppressWarnings("unchecked") private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) { return (ArrayList<HttpPipelinePolicy>) policies.clone(); } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ public static String computeDigest(ByteBuffer buffer) { ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer(); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(readOnlyBuffer); byte[] digest = md.digest(); return "sha256:" + byteArrayToHex(digest); } catch (NoSuchAlgorithmException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static MessageDigest createSha256() { try { return MessageDigest.getInstance("SHA-256"); } catch (NoSuchAlgorithmException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } private static final char[] HEX_ARRAY = "0123456789abcdef".toCharArray(); public static String byteArrayToHex(byte[] bytes) { char[] hexChars = new char[bytes.length * 2]; for (int j = 0; j < bytes.length; j++) { int v = bytes[j] & 0xFF; hexChars[j * 2] = HEX_ARRAY[v >>> 4]; hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F]; } return new String(hexChars); } /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Response<Void> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } private static <T> Response<Void> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return new SimpleResponse<Void>( responseT.getRequest(), statusCode, responseT.getHeaders(), null); } /** * This method converts the API response codes into well known exceptions. * @param exception The exception returned by the rest client. * @return The exception returned by the public methods. */ public static Throwable mapException(Throwable exception) { HttpResponseException acrException = null; if (exception instanceof HttpResponseException) { acrException = ((HttpResponseException) exception); } else if (exception instanceof RuntimeException) { RuntimeException runtimeException = (RuntimeException) exception; Throwable throwable = runtimeException.getCause(); if (throwable instanceof HttpResponseException) { acrException = (HttpResponseException) throwable; } } if (acrException == null) { return exception; } return mapAcrErrorsException(acrException); } public static HttpResponseException mapAcrErrorsException(HttpResponseException acrException) { final HttpResponse errorHttpResponse = acrException.getResponse(); final int statusCode = errorHttpResponse.getStatusCode(); final String errorDetail = acrException.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, acrException.getResponse(), acrException); case 404: return new ResourceNotFoundException(errorDetail, acrException.getResponse(), acrException); case 409: return new ResourceExistsException(errorDetail, acrException.getResponse(), acrException); case 412: return new ResourceModifiedException(errorDetail, acrException.getResponse(), acrException); default: return new HttpResponseException(errorDetail, acrException.getResponse(), acrException); } } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = getContinuationLink(listResponse.getHeaders()); List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } private static String getContinuationLink(HttpHeaders headers) { String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader) && continuationLinkHeader.charAt(0) == '<') { int endIndex = continuationLinkHeader.indexOf(">;"); if (endIndex < 2) { LOGGER.warning("unexpected 'Link' header value - '{}'", continuationLinkHeader); } return continuationLinkHeader.substring(1, endIndex); } return null; } public static List<ArtifactManifestProperties> mapManifestsProperties(List<ManifestAttributesBase> baseArtifacts, String repositoryName, String registryLoginServer) { if (baseArtifacts == null) { return null; } return baseArtifacts.stream().map(value -> { ArtifactManifestProperties manifestProperties = new ArtifactManifestProperties() .setDeleteEnabled(value.isDeleteEnabled()) .setListEnabled(value.isListEnabled()) .setWriteEnabled(value.isWriteEnabled()) .setReadEnabled(value.isReadEnabled()); ArtifactManifestPropertiesHelper.setRepositoryName(manifestProperties, repositoryName); ArtifactManifestPropertiesHelper.setRegistryLoginServer(manifestProperties, registryLoginServer); ArtifactManifestPropertiesHelper.setDigest(manifestProperties, value.getDigest()); ArtifactManifestPropertiesHelper.setRelatedArtifacts(manifestProperties, value.getRelatedArtifacts()); ArtifactManifestPropertiesHelper.setCpuArchitecture(manifestProperties, value.getArchitecture()); ArtifactManifestPropertiesHelper.setOperatingSystem(manifestProperties, value.getOperatingSystem()); ArtifactManifestPropertiesHelper.setCreatedOn(manifestProperties, value.getCreatedOn()); ArtifactManifestPropertiesHelper.setlastUpdatedOn(manifestProperties, value.getLastUpdatedOn()); ArtifactManifestPropertiesHelper.setSizeInBytes(manifestProperties, value.getSize()); ArtifactManifestPropertiesHelper.setTags(manifestProperties, value.getTags()); return manifestProperties; }).collect(Collectors.toList()); } public static List<ArtifactTagProperties> getTagProperties(List<TagAttributesBase> baseValues, String repositoryName) { Objects.requireNonNull(baseValues); return baseValues.stream().map(value -> { ArtifactTagProperties tagProperties = new ArtifactTagProperties() .setDeleteEnabled(value.isDeleteEnabled()) .setReadEnabled(value.isReadEnabled()) .setListEnabled(value.isListEnabled()) .setWriteEnabled(value.isWriteEnabled()); ArtifactTagPropertiesHelper.setCreatedOn(tagProperties, value.getCreatedOn()); ArtifactTagPropertiesHelper.setlastUpdatedOn(tagProperties, value.getLastUpdatedOn()); ArtifactTagPropertiesHelper.setRepositoryName(tagProperties, repositoryName); ArtifactTagPropertiesHelper.setName(tagProperties, value.getName()); ArtifactTagPropertiesHelper.setDigest(tagProperties, value.getDigest()); return tagProperties; }).collect(Collectors.toList()); } /** * Get the digest from the response header if available. * @param headers The headers to parse. * @return The digest value. */ public static <T> String getDigestFromHeader(HttpHeaders headers) { return headers.getValue(DOCKER_DIGEST_HEADER_NAME); } public static Context enableSync(Context tracingContext) { return tracingContext.addData(HTTP_REST_PROXY_SYNC_PROXY_ENABLE, true); } public static Context getTracingContext(Context context) { return context.addData(AZ_TRACING_NAMESPACE_KEY, CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE); } public static String trimNextLink(String locationHeader) { if (locationHeader != null && locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } public static long getBlobSize(HttpHeader contentRangeHeader) { if (contentRangeHeader != null) { int slashInd = contentRangeHeader.getValue().indexOf('/'); if (slashInd > 0) { return Long.parseLong(contentRangeHeader.getValue().substring(slashInd + 1)); } } throw LOGGER.logExceptionAsError(new ServiceResponseException("Invalid content-range header in response -" + contentRangeHeader)); } }
class UtilsImpl { private static final ClientLogger LOGGER = new ClientLogger(UtilsImpl.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-containers-containerregistry.properties"); private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); private static final int HTTP_STATUS_CODE_NOT_FOUND = 404; private static final int HTTP_STATUS_CODE_ACCEPTED = 202; private static final HttpHeaderName CONTINUATION_LINK_HEADER_NAME = HttpHeaderName.fromString("Link"); private static final String HTTP_REST_PROXY_SYNC_PROXY_ENABLE = "com.azure.core.http.restproxy.syncproxy.enable"; public static final HttpHeaderName DOCKER_DIGEST_HEADER_NAME = HttpHeaderName.fromString("docker-content-digest"); public static final String OCI_MANIFEST_MEDIA_TYPE = "application/vnd.oci.image.manifest.v1+json"; public static final String CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE = "Microsoft.ContainerRegistry"; public static final int CHUNK_SIZE = 4 * 1024 * 1024; private UtilsImpl() { } /** * This method builds the httpPipeline for the builders. * @param clientOptions The client options * @param logOptions http log options. * @param configuration configuration settings. * @param retryPolicy retry policy * @param retryOptions retry options * @param credential credentials. * @param perCallPolicies per call policies. * @param perRetryPolicies per retry policies. * @param httpClient http client * @param endpoint endpoint to be called * @param serviceVersion the service api version being targeted by the client. * @return returns the httpPipeline to be consumed by the builders. */ public static HttpPipeline buildHttpPipeline( ClientOptions clientOptions, HttpLogOptions logOptions, Configuration configuration, RetryPolicy retryPolicy, RetryOptions retryOptions, TokenCredential credential, ContainerRegistryAudience audience, List<HttpPipelinePolicy> perCallPolicies, List<HttpPipelinePolicy> perRetryPolicies, HttpClient httpClient, String endpoint, ContainerRegistryServiceVersion serviceVersion) { ArrayList<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add( new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, logOptions), CLIENT_NAME, CLIENT_VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new CookiePolicy()); policies.add(new AddDatePolicy()); policies.add(new ContainerRegistryRedirectPolicy()); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); HttpLoggingPolicy loggingPolicy = new HttpLoggingPolicy(logOptions); if (credential == null) { LOGGER.verbose("Credentials are null, enabling anonymous access"); } ArrayList<HttpPipelinePolicy> credentialPolicies = clone(policies); credentialPolicies.add(loggingPolicy); if (audience == null) { audience = ContainerRegistryAudience.AZURE_RESOURCE_MANAGER_PUBLIC_CLOUD; } ContainerRegistryTokenService tokenService = new ContainerRegistryTokenService( credential, audience, endpoint, serviceVersion, new HttpPipelineBuilder() .policies(credentialPolicies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(), JacksonAdapter.createDefaultSerializerAdapter()); ContainerRegistryCredentialsPolicy credentialsPolicy = new ContainerRegistryCredentialsPolicy(tokenService); policies.add(credentialsPolicy); policies.add(loggingPolicy); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } @SuppressWarnings("unchecked") private static ArrayList<HttpPipelinePolicy> clone(ArrayList<HttpPipelinePolicy> policies) { return (ArrayList<HttpPipelinePolicy>) policies.clone(); } /** * This method computes the digest for the buffer content. * Docker digest is a SHA256 hash of the docker image content and is deterministic based on the image build. * @param buffer The buffer containing the image bytes. * @return SHA-256 digest for the given buffer. */ public static String computeDigest(ByteBuffer buffer) { ByteBuffer readOnlyBuffer = buffer.asReadOnlyBuffer(); try { MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(readOnlyBuffer); byte[] digest = md.digest(); return "sha256:" + byteArrayToHex(digest); } catch (NoSuchAlgorithmException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } public static MessageDigest createSha256() { try { return MessageDigest.getInstance("SHA-256"); } catch (NoSuchAlgorithmException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } private static final char[] HEX_ARRAY = "0123456789abcdef".toCharArray(); public static String byteArrayToHex(byte[] bytes) { char[] hexChars = new char[bytes.length * 2]; for (int j = 0; j < bytes.length; j++) { int v = bytes[j] & 0xFF; hexChars[j * 2] = HEX_ARRAY[v >>> 4]; hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F]; } return new String(hexChars); } /** * Delete operation should be idempotent. * And so should result in a success in case the service response is 400 : Not found. * @param responseT The response object. * @param <T> The encapsulating value. * @return The transformed response object. */ public static <T> Response<Void> deleteResponseToSuccess(Response<T> responseT) { if (responseT.getStatusCode() != HTTP_STATUS_CODE_NOT_FOUND) { return getAcceptedDeleteResponse(responseT, responseT.getStatusCode()); } return getAcceptedDeleteResponse(responseT, HTTP_STATUS_CODE_ACCEPTED); } private static <T> Response<Void> getAcceptedDeleteResponse(Response<T> responseT, int statusCode) { return new SimpleResponse<Void>( responseT.getRequest(), statusCode, responseT.getHeaders(), null); } /** * This method converts the API response codes into well known exceptions. * @param exception The exception returned by the rest client. * @return The exception returned by the public methods. */ public static Throwable mapException(Throwable exception) { HttpResponseException acrException = null; if (exception instanceof HttpResponseException) { acrException = ((HttpResponseException) exception); } else if (exception instanceof RuntimeException) { RuntimeException runtimeException = (RuntimeException) exception; Throwable throwable = runtimeException.getCause(); if (throwable instanceof HttpResponseException) { acrException = (HttpResponseException) throwable; } } if (acrException == null) { return exception; } return mapAcrErrorsException(acrException); } public static HttpResponseException mapAcrErrorsException(HttpResponseException acrException) { final HttpResponse errorHttpResponse = acrException.getResponse(); final int statusCode = errorHttpResponse.getStatusCode(); final String errorDetail = acrException.getMessage(); switch (statusCode) { case 401: return new ClientAuthenticationException(errorDetail, acrException.getResponse(), acrException); case 404: return new ResourceNotFoundException(errorDetail, acrException.getResponse(), acrException); case 409: return new ResourceExistsException(errorDetail, acrException.getResponse(), acrException); case 412: return new ResourceModifiedException(errorDetail, acrException.getResponse(), acrException); default: return new HttpResponseException(errorDetail, acrException.getResponse(), acrException); } } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * @param listResponse response that is parsed. * @param <T> the model type that is being operated on. * @return paged response with the correct continuation token. */ public static <T> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<T> listResponse) { return getPagedResponseWithContinuationToken(listResponse, values -> values); } /** * This method parses the response to get the continuation token used to make the next pagination call. * The continuation token is returned by the service in the form of a header and not as a nextLink field. * * <p> * Per the Docker v2 HTTP API spec, the Link header is an RFC5988 * compliant rel='next' with URL to next result set, if available. * See: https: * * The URI reference can be obtained from link-value as follows: * Link = "Link" ":" * link-value = "<" URI-Reference ">" * (";" link-param ) * See: https: * </p> * @param listResponse response that is parsed. * @param mapperFunction the function that maps the rest api response into the public model exposed by the client. * @param <T> The model type returned by the rest client. * @param <R> The model type returned by the public client. * @return paged response with the correct continuation token. */ public static <T, R> PagedResponse<T> getPagedResponseWithContinuationToken(PagedResponse<R> listResponse, Function<List<R>, List<T>> mapperFunction) { Objects.requireNonNull(mapperFunction); String continuationLink = getContinuationLink(listResponse.getHeaders()); List<T> values = mapperFunction.apply(listResponse.getValue()); return new PagedResponseBase<String, T>( listResponse.getRequest(), listResponse.getStatusCode(), listResponse.getHeaders(), values, continuationLink, null ); } private static String getContinuationLink(HttpHeaders headers) { String continuationLinkHeader = headers.getValue(CONTINUATION_LINK_HEADER_NAME); if (!CoreUtils.isNullOrEmpty(continuationLinkHeader) && continuationLinkHeader.charAt(0) == '<') { int endIndex = continuationLinkHeader.indexOf(">;"); if (endIndex < 2) { LOGGER.warning("unexpected 'Link' header value - '{}'", continuationLinkHeader); } return continuationLinkHeader.substring(1, endIndex); } return null; } public static List<ArtifactManifestProperties> mapManifestsProperties(List<ManifestAttributesBase> baseArtifacts, String repositoryName, String registryLoginServer) { if (baseArtifacts == null) { return null; } return baseArtifacts.stream().map(value -> { ArtifactManifestProperties manifestProperties = new ArtifactManifestProperties() .setDeleteEnabled(value.isDeleteEnabled()) .setListEnabled(value.isListEnabled()) .setWriteEnabled(value.isWriteEnabled()) .setReadEnabled(value.isReadEnabled()); ArtifactManifestPropertiesHelper.setRepositoryName(manifestProperties, repositoryName); ArtifactManifestPropertiesHelper.setRegistryLoginServer(manifestProperties, registryLoginServer); ArtifactManifestPropertiesHelper.setDigest(manifestProperties, value.getDigest()); ArtifactManifestPropertiesHelper.setRelatedArtifacts(manifestProperties, value.getRelatedArtifacts()); ArtifactManifestPropertiesHelper.setCpuArchitecture(manifestProperties, value.getArchitecture()); ArtifactManifestPropertiesHelper.setOperatingSystem(manifestProperties, value.getOperatingSystem()); ArtifactManifestPropertiesHelper.setCreatedOn(manifestProperties, value.getCreatedOn()); ArtifactManifestPropertiesHelper.setlastUpdatedOn(manifestProperties, value.getLastUpdatedOn()); ArtifactManifestPropertiesHelper.setSizeInBytes(manifestProperties, value.getSize()); ArtifactManifestPropertiesHelper.setTags(manifestProperties, value.getTags()); return manifestProperties; }).collect(Collectors.toList()); } public static List<ArtifactTagProperties> getTagProperties(List<TagAttributesBase> baseValues, String repositoryName) { Objects.requireNonNull(baseValues); return baseValues.stream().map(value -> { ArtifactTagProperties tagProperties = new ArtifactTagProperties() .setDeleteEnabled(value.isDeleteEnabled()) .setReadEnabled(value.isReadEnabled()) .setListEnabled(value.isListEnabled()) .setWriteEnabled(value.isWriteEnabled()); ArtifactTagPropertiesHelper.setCreatedOn(tagProperties, value.getCreatedOn()); ArtifactTagPropertiesHelper.setlastUpdatedOn(tagProperties, value.getLastUpdatedOn()); ArtifactTagPropertiesHelper.setRepositoryName(tagProperties, repositoryName); ArtifactTagPropertiesHelper.setName(tagProperties, value.getName()); ArtifactTagPropertiesHelper.setDigest(tagProperties, value.getDigest()); return tagProperties; }).collect(Collectors.toList()); } public static void validateResponseHeaderDigest(String requestedDigest, HttpHeaders headers) { String responseHeaderDigest = headers.getValue(DOCKER_DIGEST_HEADER_NAME); if (!requestedDigest.equals(responseHeaderDigest)) { throw LOGGER.atError() .addKeyValue("requestedDigest", requestedDigest) .addKeyValue("responseDigest", responseHeaderDigest) .log(new ServiceResponseException("The digest in the response header does not match the expected digest.")); } } public static Context enableSync(Context tracingContext) { return tracingContext.addData(HTTP_REST_PROXY_SYNC_PROXY_ENABLE, true); } public static Context getTracingContext(Context context) { return context.addData(AZ_TRACING_NAMESPACE_KEY, CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE); } public static String trimNextLink(String locationHeader) { if (locationHeader != null && locationHeader.startsWith("/")) { return locationHeader.substring(1); } return locationHeader; } public static long getBlobSize(HttpHeader contentRangeHeader) { if (contentRangeHeader != null) { int slashInd = contentRangeHeader.getValue().indexOf('/'); if (slashInd > 0) { return Long.parseLong(contentRangeHeader.getValue().substring(slashInd + 1)); } } throw LOGGER.logExceptionAsError(new ServiceResponseException("Invalid content-range header in response -" + contentRangeHeader)); } }
If `BlobDownloadAsyncResult` supports is constructing an instance with dummy variables is cleaner, won't need to try/catch.
public static BlobDownloadAsyncResult createBlobDownloadResult(String digest, Flux<ByteBuffer> content) { if (blobDownloadAccessor == null) { try { Class.forName(BlobDownloadAsyncResult.class.getName(), true, BlobDownloadAsyncResultConstructorAccessor.class.getClassLoader()); } catch (ClassNotFoundException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } assert blobDownloadAccessor != null; return blobDownloadAccessor.create(digest, content); }
Class.forName(BlobDownloadAsyncResult.class.getName(), true, BlobDownloadAsyncResultConstructorAccessor.class.getClassLoader());
public static BlobDownloadAsyncResult createBlobDownloadResult(String digest, Flux<ByteBuffer> content) { if (blobDownloadAccessor == null) { try { Class.forName(BlobDownloadAsyncResult.class.getName(), true, BlobDownloadAsyncResultConstructorAccessor.class.getClassLoader()); } catch (ClassNotFoundException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } assert blobDownloadAccessor != null; return blobDownloadAccessor.create(digest, content); }
class ConstructorAccessors { private static final ClientLogger LOGGER = new ClientLogger(ConstructorAccessors.class); private static BlobDownloadAsyncResultConstructorAccessor blobDownloadAccessor; public interface BlobDownloadAsyncResultConstructorAccessor { BlobDownloadAsyncResult create(String digest, Flux<ByteBuffer> content); } public static void setBlobDownloadResultAccessor(final BlobDownloadAsyncResultConstructorAccessor accessor) { blobDownloadAccessor = accessor; } }
class ConstructorAccessors { private static final ClientLogger LOGGER = new ClientLogger(ConstructorAccessors.class); private static BlobDownloadAsyncResultConstructorAccessor blobDownloadAccessor; public interface BlobDownloadAsyncResultConstructorAccessor { BlobDownloadAsyncResult create(String digest, Flux<ByteBuffer> content); } public static void setBlobDownloadResultAccessor(final BlobDownloadAsyncResultConstructorAccessor accessor) { blobDownloadAccessor = accessor; } }
Does Container Registry support HTTP preconditions? If so, we should pass a `If-Match: firstResponse.ETAG_HEADER` to ensure that we fail the download if the content changes between requests.
private Flux<ContainerRegistryBlobsGetChunkResponse> getAllChunks(ContainerRegistryBlobsGetChunkResponse firstResponse, String digest, Context context) { String responseHeaderDigest = firstResponse.getHeaders().getValue(DOCKER_DIGEST_HEADER_NAME); if (!digest.equals(responseHeaderDigest)) { return Flux.error(LOGGER.atError() .addKeyValue("requestedDigest", digest) .addKeyValue("responseDigest", responseHeaderDigest) .log(new ServiceResponseException("The digest in the response header does not match the expected digest."))); } long blobSize = getBlobSize(firstResponse.getHeaders().get(HttpHeaderName.CONTENT_RANGE)); List<Mono<ContainerRegistryBlobsGetChunkResponse>> others = new ArrayList<>(); others.add(Mono.just(firstResponse)); for (long p = firstResponse.getValue().getLength(); p < blobSize; p += CHUNK_SIZE) { HttpRange range = new HttpRange(p, (long) CHUNK_SIZE); others.add(blobsImpl.getChunkWithResponseAsync(repositoryName, digest, range.toString(), context)); } return Flux.concat(others); }
others.add(blobsImpl.getChunkWithResponseAsync(repositoryName, digest, range.toString(), context));
private Flux<ContainerRegistryBlobsGetChunkResponse> getAllChunks(ContainerRegistryBlobsGetChunkResponse firstResponse, String digest, Context context) { validateResponseHeaderDigest(digest, firstResponse.getHeaders()); long blobSize = getBlobSize(firstResponse.getHeaders().get(HttpHeaderName.CONTENT_RANGE)); List<Mono<ContainerRegistryBlobsGetChunkResponse>> others = new ArrayList<>(); others.add(Mono.just(firstResponse)); for (long p = firstResponse.getValue().getLength(); p < blobSize; p += CHUNK_SIZE) { HttpRange range = new HttpRange(p, (long) CHUNK_SIZE); others.add(blobsImpl.getChunkWithResponseAsync(repositoryName, digest, range.toString(), context)); } return Flux.concat(others); }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(LOGGER, new NullPointerException("'manifest' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(new UploadManifestOptions(manifest), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param options The options for the upload manifest operation. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(UploadManifestOptions options) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(options, context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param options The options for the upload manifest operation. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(UploadManifestOptions options) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(options, context)); } private Mono<Response<UploadManifestResult>> uploadManifestWithResponse(UploadManifestOptions options, Context context) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } ByteBuffer data = options.getManifest().toByteBuffer(); String tagOrDigest = options.getTag() != null ? options.getTag() : UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, tagOrDigest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(LOGGER, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } private Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(LOGGER, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), (Flux<ByteBuffer>) null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param options Options for the operation. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadManifestResult> downloadManifest(DownloadManifestOptions options) { return this.downloadManifestWithResponse(options).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param options The options for the operation. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadManifestResult>> downloadManifestWithResponse(DownloadManifestOptions options) { return withContext(context -> this.downloadManifestWithResponse(options, context)); } private Mono<Response<DownloadManifestResult>> downloadManifestWithResponse(DownloadManifestOptions options, Context context) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } String tagOrDigest = options.getTag() != null ? options.getTag() : options.getDigest(); return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = UtilsImpl.getDigestFromHeader(response.getHeaders()); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<DownloadManifestResult> res = new SimpleResponse<>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new DownloadManifestResult(digest, ociManifest, BinaryData.fromObject(ociManifest))); return Mono.just(res); } else { return monoError(LOGGER, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlobDownloadAsyncResult> downloadStream(String digest) { return withContext(context -> downloadBlobInternal(digest, context)); } private Mono<BlobDownloadAsyncResult> downloadBlobInternal(String digest, Context context) { if (digest == null) { return monoError(LOGGER, new NullPointerException("'digest' can't be null.")); } Flux<ByteBuffer> content = blobsImpl.getChunkWithResponseAsync(repositoryName, digest, new HttpRange(0, (long) CHUNK_SIZE).toString(), context) .flatMapMany(firstResponse -> getAllChunks(firstResponse, digest, context)) .flatMapSequential(chunk -> chunk.getValue().toFluxByteBuffer(), 1); return Mono.just(ConstructorAccessors.createBlobDownloadResult(digest, content)); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } private Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(LOGGER, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(response -> Mono.just(UtilsImpl.deleteResponseToSuccess(response))) .onErrorResume( ex -> ex instanceof HttpResponseException && ((HttpResponseException) ex).getResponse().getStatusCode() == 404, ex -> { HttpResponse response = ((HttpResponseException) ex).getResponse(); return Mono.just(new SimpleResponse<Void>(response.getRequest(), 202, response.getHeaders(), null)); }) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } private Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(response -> Mono.just(UtilsImpl.deleteResponseToSuccess(response))) .onErrorMap(UtilsImpl::mapException); } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(LOGGER, new NullPointerException("'manifest' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(new UploadManifestOptions(manifest), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param options The options for the upload manifest operation. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(UploadManifestOptions options) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(options, context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param options The options for the upload manifest operation. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(UploadManifestOptions options) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(options, context)); } private Mono<Response<UploadManifestResult>> uploadManifestWithResponse(UploadManifestOptions options, Context context) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } ByteBuffer data = options.getManifest().toByteBuffer(); String tagOrDigest = options.getTag() != null ? options.getTag() : UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, tagOrDigest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(LOGGER, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } private Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(LOGGER, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), (Flux<ByteBuffer>) null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param options Options for the operation. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadManifestResult> downloadManifest(DownloadManifestOptions options) { return this.downloadManifestWithResponse(options).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param options The options for the operation. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadManifestResult>> downloadManifestWithResponse(DownloadManifestOptions options) { return withContext(context -> this.downloadManifestWithResponse(options, context)); } private Mono<Response<DownloadManifestResult>> downloadManifestWithResponse(DownloadManifestOptions options, Context context) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } String tagOrDigest = options.getTag() != null ? options.getTag() : options.getDigest(); return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<DownloadManifestResult> res = new SimpleResponse<>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new DownloadManifestResult(digest, ociManifest, BinaryData.fromObject(ociManifest))); return Mono.just(res); } else { return monoError(LOGGER, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlobDownloadAsyncResult> downloadStream(String digest) { return withContext(context -> downloadBlobInternal(digest, context)); } private Mono<BlobDownloadAsyncResult> downloadBlobInternal(String digest, Context context) { if (digest == null) { return monoError(LOGGER, new NullPointerException("'digest' can't be null.")); } Flux<ByteBuffer> content = blobsImpl.getChunkWithResponseAsync(repositoryName, digest, new HttpRange(0, (long) CHUNK_SIZE).toString(), context) .flatMapMany(firstResponse -> getAllChunks(firstResponse, digest, context)) .flatMapSequential(chunk -> chunk.getValue().toFluxByteBuffer(), 1); return Mono.just(ConstructorAccessors.createBlobDownloadResult(digest, content)); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } private Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(LOGGER, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(response -> Mono.just(UtilsImpl.deleteResponseToSuccess(response))) .onErrorResume( ex -> ex instanceof HttpResponseException && ((HttpResponseException) ex).getResponse().getStatusCode() == 404, ex -> { HttpResponse response = ((HttpResponseException) ex).getResponse(); return Mono.just(new SimpleResponse<Void>(response.getRequest(), 202, response.getHeaders(), null)); }) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } private Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(response -> Mono.just(UtilsImpl.deleteResponseToSuccess(response))) .onErrorMap(UtilsImpl::mapException); } }
I want to keep `BlobDownloadAsyncResult` constructor package-private
public static BlobDownloadAsyncResult createBlobDownloadResult(String digest, Flux<ByteBuffer> content) { if (blobDownloadAccessor == null) { try { Class.forName(BlobDownloadAsyncResult.class.getName(), true, BlobDownloadAsyncResultConstructorAccessor.class.getClassLoader()); } catch (ClassNotFoundException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } assert blobDownloadAccessor != null; return blobDownloadAccessor.create(digest, content); }
Class.forName(BlobDownloadAsyncResult.class.getName(), true, BlobDownloadAsyncResultConstructorAccessor.class.getClassLoader());
public static BlobDownloadAsyncResult createBlobDownloadResult(String digest, Flux<ByteBuffer> content) { if (blobDownloadAccessor == null) { try { Class.forName(BlobDownloadAsyncResult.class.getName(), true, BlobDownloadAsyncResultConstructorAccessor.class.getClassLoader()); } catch (ClassNotFoundException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } assert blobDownloadAccessor != null; return blobDownloadAccessor.create(digest, content); }
class ConstructorAccessors { private static final ClientLogger LOGGER = new ClientLogger(ConstructorAccessors.class); private static BlobDownloadAsyncResultConstructorAccessor blobDownloadAccessor; public interface BlobDownloadAsyncResultConstructorAccessor { BlobDownloadAsyncResult create(String digest, Flux<ByteBuffer> content); } public static void setBlobDownloadResultAccessor(final BlobDownloadAsyncResultConstructorAccessor accessor) { blobDownloadAccessor = accessor; } }
class ConstructorAccessors { private static final ClientLogger LOGGER = new ClientLogger(ConstructorAccessors.class); private static BlobDownloadAsyncResultConstructorAccessor blobDownloadAccessor; public interface BlobDownloadAsyncResultConstructorAccessor { BlobDownloadAsyncResult create(String digest, Flux<ByteBuffer> content); } public static void setBlobDownloadResultAccessor(final BlobDownloadAsyncResultConstructorAccessor accessor) { blobDownloadAccessor = accessor; } }
the content is immutable - the digest is sha256 of the content and it's used to get the content. We also validate it at the end.
private Flux<ContainerRegistryBlobsGetChunkResponse> getAllChunks(ContainerRegistryBlobsGetChunkResponse firstResponse, String digest, Context context) { String responseHeaderDigest = firstResponse.getHeaders().getValue(DOCKER_DIGEST_HEADER_NAME); if (!digest.equals(responseHeaderDigest)) { return Flux.error(LOGGER.atError() .addKeyValue("requestedDigest", digest) .addKeyValue("responseDigest", responseHeaderDigest) .log(new ServiceResponseException("The digest in the response header does not match the expected digest."))); } long blobSize = getBlobSize(firstResponse.getHeaders().get(HttpHeaderName.CONTENT_RANGE)); List<Mono<ContainerRegistryBlobsGetChunkResponse>> others = new ArrayList<>(); others.add(Mono.just(firstResponse)); for (long p = firstResponse.getValue().getLength(); p < blobSize; p += CHUNK_SIZE) { HttpRange range = new HttpRange(p, (long) CHUNK_SIZE); others.add(blobsImpl.getChunkWithResponseAsync(repositoryName, digest, range.toString(), context)); } return Flux.concat(others); }
others.add(blobsImpl.getChunkWithResponseAsync(repositoryName, digest, range.toString(), context));
private Flux<ContainerRegistryBlobsGetChunkResponse> getAllChunks(ContainerRegistryBlobsGetChunkResponse firstResponse, String digest, Context context) { validateResponseHeaderDigest(digest, firstResponse.getHeaders()); long blobSize = getBlobSize(firstResponse.getHeaders().get(HttpHeaderName.CONTENT_RANGE)); List<Mono<ContainerRegistryBlobsGetChunkResponse>> others = new ArrayList<>(); others.add(Mono.just(firstResponse)); for (long p = firstResponse.getValue().getLength(); p < blobSize; p += CHUNK_SIZE) { HttpRange range = new HttpRange(p, (long) CHUNK_SIZE); others.add(blobsImpl.getChunkWithResponseAsync(repositoryName, digest, range.toString(), context)); } return Flux.concat(others); }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(LOGGER, new NullPointerException("'manifest' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(new UploadManifestOptions(manifest), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param options The options for the upload manifest operation. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(UploadManifestOptions options) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(options, context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param options The options for the upload manifest operation. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(UploadManifestOptions options) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(options, context)); } private Mono<Response<UploadManifestResult>> uploadManifestWithResponse(UploadManifestOptions options, Context context) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } ByteBuffer data = options.getManifest().toByteBuffer(); String tagOrDigest = options.getTag() != null ? options.getTag() : UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, tagOrDigest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(LOGGER, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } private Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(LOGGER, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), (Flux<ByteBuffer>) null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param options Options for the operation. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadManifestResult> downloadManifest(DownloadManifestOptions options) { return this.downloadManifestWithResponse(options).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param options The options for the operation. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadManifestResult>> downloadManifestWithResponse(DownloadManifestOptions options) { return withContext(context -> this.downloadManifestWithResponse(options, context)); } private Mono<Response<DownloadManifestResult>> downloadManifestWithResponse(DownloadManifestOptions options, Context context) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } String tagOrDigest = options.getTag() != null ? options.getTag() : options.getDigest(); return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = UtilsImpl.getDigestFromHeader(response.getHeaders()); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<DownloadManifestResult> res = new SimpleResponse<>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new DownloadManifestResult(digest, ociManifest, BinaryData.fromObject(ociManifest))); return Mono.just(res); } else { return monoError(LOGGER, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlobDownloadAsyncResult> downloadStream(String digest) { return withContext(context -> downloadBlobInternal(digest, context)); } private Mono<BlobDownloadAsyncResult> downloadBlobInternal(String digest, Context context) { if (digest == null) { return monoError(LOGGER, new NullPointerException("'digest' can't be null.")); } Flux<ByteBuffer> content = blobsImpl.getChunkWithResponseAsync(repositoryName, digest, new HttpRange(0, (long) CHUNK_SIZE).toString(), context) .flatMapMany(firstResponse -> getAllChunks(firstResponse, digest, context)) .flatMapSequential(chunk -> chunk.getValue().toFluxByteBuffer(), 1); return Mono.just(ConstructorAccessors.createBlobDownloadResult(digest, content)); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } private Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(LOGGER, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(response -> Mono.just(UtilsImpl.deleteResponseToSuccess(response))) .onErrorResume( ex -> ex instanceof HttpResponseException && ((HttpResponseException) ex).getResponse().getStatusCode() == 404, ex -> { HttpResponse response = ((HttpResponseException) ex).getResponse(); return Mono.just(new SimpleResponse<Void>(response.getRequest(), 202, response.getHeaders(), null)); }) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } private Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(response -> Mono.just(UtilsImpl.deleteResponseToSuccess(response))) .onErrorMap(UtilsImpl::mapException); } }
class ContainerRegistryBlobAsyncClient { private final AzureContainerRegistryImpl registryImplClient; private final ContainerRegistryBlobsImpl blobsImpl; private final ContainerRegistriesImpl registriesImpl; private final String endpoint; private final String repositoryName; private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryBlobAsyncClient.class); ContainerRegistryBlobAsyncClient(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { this.repositoryName = repositoryName; this.endpoint = endpoint; this.registryImplClient = new AzureContainerRegistryImplBuilder() .url(endpoint) .pipeline(httpPipeline) .apiVersion(version) .buildClient(); this.blobsImpl = this.registryImplClient.getContainerRegistryBlobs(); this.registriesImpl = this.registryImplClient.getContainerRegistries(); } /** * This method returns the registry's repository on which operations are being performed. * * @return The name of the repository */ public String getRepositoryName() { return this.repositoryName; } /** * This method returns the complete registry endpoint. * * @return The registry endpoint including the authority. */ public String getEndpoint() { return this.endpoint; } /** * Upload the Oci manifest to the repository. * The upload is done as a single operation. * @see <a href="https: * @param manifest The OciManifest that needs to be uploaded. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code manifest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(OciManifest manifest) { if (manifest == null) { return monoError(LOGGER, new NullPointerException("'manifest' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(new UploadManifestOptions(manifest), context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * @param options The options for the upload manifest operation. * @return operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadManifestResult> uploadManifest(UploadManifestOptions options) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(options, context)).flatMap(FluxUtil::toMono); } /** * Uploads a manifest to the repository. * The client currently only supports uploading OciManifests to the repository. * And this operation makes the assumption that the data provided is a valid OCI manifest. * <p> * Also, the data is read into memory and then an upload operation is performed as a single operation. * @see <a href="https: * * @param options The options for the upload manifest operation. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadManifestResult>> uploadManifestWithResponse(UploadManifestOptions options) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } return withContext(context -> this.uploadManifestWithResponse(options, context)); } private Mono<Response<UploadManifestResult>> uploadManifestWithResponse(UploadManifestOptions options, Context context) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } ByteBuffer data = options.getManifest().toByteBuffer(); String tagOrDigest = options.getTag() != null ? options.getTag() : UtilsImpl.computeDigest(data); return this.registriesImpl.createManifestWithResponseAsync( repositoryName, tagOrDigest, Flux.just(data), data.remaining(), UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context).map(response -> { Response<UploadManifestResult> res = new ResponseBase<ContainerRegistriesCreateManifestHeaders, UploadManifestResult>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new UploadManifestResult(response.getDeserializedHeaders().getDockerContentDigest()), response.getDeserializedHeaders()); return res; }).onErrorMap(UtilsImpl::mapException); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UploadBlobResult> uploadBlob(BinaryData data) { if (data == null) { return monoError(LOGGER, new NullPointerException("'data' can't be null.")); } return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)).flatMap(FluxUtil::toMono); } /** * Uploads a blob to the repository. * The client currently uploads the entire blob\layer as a single unit. * <p> * The blob is read into memory and then an upload operation is performed as a single operation. * We currently do not support breaking the layer into multiple chunks and uploading them one at a time * * @param data The blob\image content that needs to be uploaded. * @return The rest response containing the operation result. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code data} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UploadBlobResult>> uploadBlobWithResponse(BinaryData data) { return withContext(context -> this.uploadBlobWithResponse(data.toByteBuffer(), context)); } private Mono<Response<UploadBlobResult>> uploadBlobWithResponse(ByteBuffer data, Context context) { if (data == null) { return monoError(LOGGER, new NullPointerException("'data' can't be null.")); } String digest = UtilsImpl.computeDigest(data); return this.blobsImpl.startUploadWithResponseAsync(repositoryName, context) .flatMap(startUploadResponse -> this.blobsImpl.uploadChunkWithResponseAsync(trimNextLink(startUploadResponse.getDeserializedHeaders().getLocation()), Flux.just(data), data.remaining(), context)) .flatMap(uploadChunkResponse -> this.blobsImpl.completeUploadWithResponseAsync(digest, trimNextLink(uploadChunkResponse.getDeserializedHeaders().getLocation()), (Flux<ByteBuffer>) null, 0L, context)) .flatMap(completeUploadResponse -> { Response<UploadBlobResult> res = new ResponseBase<ContainerRegistryBlobsCompleteUploadHeaders, UploadBlobResult>(completeUploadResponse.getRequest(), completeUploadResponse.getStatusCode(), completeUploadResponse.getHeaders(), new UploadBlobResult(completeUploadResponse.getDeserializedHeaders().getDockerContentDigest()), completeUploadResponse.getDeserializedHeaders()); return Mono.just(res); }).onErrorMap(UtilsImpl::mapException); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param options Options for the operation. * @return The manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DownloadManifestResult> downloadManifest(DownloadManifestOptions options) { return this.downloadManifestWithResponse(options).flatMap(FluxUtil::toMono); } /** * Download the manifest associated with the given tag or digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param options The options for the operation. * @return The response for the manifest associated with the given tag or digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code tagOrDigest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DownloadManifestResult>> downloadManifestWithResponse(DownloadManifestOptions options) { return withContext(context -> this.downloadManifestWithResponse(options, context)); } private Mono<Response<DownloadManifestResult>> downloadManifestWithResponse(DownloadManifestOptions options, Context context) { if (options == null) { return monoError(LOGGER, new NullPointerException("'options' can't be null.")); } String tagOrDigest = options.getTag() != null ? options.getTag() : options.getDigest(); return this.registriesImpl.getManifestWithResponseAsync(repositoryName, tagOrDigest, UtilsImpl.OCI_MANIFEST_MEDIA_TYPE, context) .flatMap(response -> { String digest = response.getHeaders().getValue(DOCKER_DIGEST_HEADER_NAME); ManifestWrapper wrapper = response.getValue(); if (Objects.equals(digest, tagOrDigest) || Objects.equals(response.getValue().getTag(), tagOrDigest)) { OciManifest ociManifest = new OciManifest() .setAnnotations(wrapper.getAnnotations()) .setConfig(wrapper.getConfig()) .setLayers(wrapper.getLayers()) .setSchemaVersion(wrapper.getSchemaVersion()); Response<DownloadManifestResult> res = new SimpleResponse<>( response.getRequest(), response.getStatusCode(), response.getHeaders(), new DownloadManifestResult(digest, ociManifest, BinaryData.fromObject(ociManifest))); return Mono.just(res); } else { return monoError(LOGGER, new ServiceResponseException("The digest in the response does not match the expected digest.")); } }).onErrorMap(UtilsImpl::mapException); } /** * Download the blob\layer associated with the given digest. * * @param digest The digest for the given image layer. * @return The image associated with the given digest. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlobDownloadAsyncResult> downloadStream(String digest) { return withContext(context -> downloadBlobInternal(digest, context)); } private Mono<BlobDownloadAsyncResult> downloadBlobInternal(String digest, Context context) { if (digest == null) { return monoError(LOGGER, new NullPointerException("'digest' can't be null.")); } Flux<ByteBuffer> content = blobsImpl.getChunkWithResponseAsync(repositoryName, digest, new HttpRange(0, (long) CHUNK_SIZE).toString(), context) .flatMapMany(firstResponse -> getAllChunks(firstResponse, digest, context)) .flatMapSequential(chunk -> chunk.getValue().toFluxByteBuffer(), 1); return Mono.just(ConstructorAccessors.createBlobDownloadResult(digest, content)); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The completion signal. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteBlob(String digest) { return this.deleteBlobWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the image associated with the given digest * * @param digest The digest for the given image layer. * @return The REST response for the completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteBlobWithResponse(String digest) { return withContext(context -> deleteBlobWithResponse(digest, context)); } private Mono<Response<Void>> deleteBlobWithResponse(String digest, Context context) { if (digest == null) { return monoError(LOGGER, new NullPointerException("'digest' can't be null.")); } return this.blobsImpl.deleteBlobWithResponseAsync(repositoryName, digest, context) .flatMap(response -> Mono.just(UtilsImpl.deleteResponseToSuccess(response))) .onErrorResume( ex -> ex instanceof HttpResponseException && ((HttpResponseException) ex).getResponse().getStatusCode() == 404, ex -> { HttpResponse response = ((HttpResponseException) ex).getResponse(); return Mono.just(new SimpleResponse<Void>(response.getRequest(), 202, response.getHeaders(), null)); }) .onErrorMap(UtilsImpl::mapException); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> deleteManifest(String digest) { return this.deleteManifestWithResponse(digest).flatMap(FluxUtil::toMono); } /** * Delete the manifest associated with the given digest. * We currently only support downloading OCI manifests. * * @see <a href="https: * * @param digest The digest of the manifest. * @return The REST response for completion. * @throws ClientAuthenticationException thrown if the client's credentials do not have access to modify the namespace. * @throws NullPointerException thrown if the {@code digest} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteManifestWithResponse(String digest) { return withContext(context -> deleteManifestWithResponse(digest, context)); } private Mono<Response<Void>> deleteManifestWithResponse(String digest, Context context) { return this.registriesImpl.deleteManifestWithResponseAsync(repositoryName, digest, context) .flatMap(response -> Mono.just(UtilsImpl.deleteResponseToSuccess(response))) .onErrorMap(UtilsImpl::mapException); } }
Should we check whether `managedIdentityClientId` is null or empty before doing any of the checks as it's the only constructor parameter not retrieved in this method.
private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(managedIdentityClientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(managedIdentityClientId, tenantId, federatedTokenFilePath, azureAuthorityHost, identityClientOptions.clone()); } return null; }
|| CoreUtils.isNullOrEmpty(managedIdentityClientId)
private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(tenantId, clientId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; } }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { loadFallbackValuesFromEnvironment(); if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private void loadFallbackValuesFromEnvironment() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); tenantId = CoreUtils.isNullOrEmpty(tenantId) ? configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID) : tenantId; managedIdentityClientId = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID) : managedIdentityClientId; } private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; } }
Broader question, do other credentials have similar logic where they check if the credential is available? If so, could this be exposed internally in a way where chained credentials or default credentials can inspect it and exclude the credential instead of having to use Reactor's error and resume pattern?
public Mono<AccessToken> getToken(TokenRequestContext request) { if (credentialAvailable) { return identityClient.authenticateWithExchangeToken(request); } else { return Mono.error(LOGGER.logExceptionAsError(new CredentialUnavailableException("WorkloadIdentityCredential authentication unavailable." + " Environment variables are not fully configured."))); } }
if (credentialAvailable) {
public Mono<AccessToken> getToken(TokenRequestContext request) { return identityClient.authenticateWithExchangeToken(request); }
class WorkloadIdentityCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(WorkloadIdentityCredential.class); private final IdentityClient identityClient; private final boolean credentialAvailable; /** * WorkloadIdentityCredential supports Azure workload identity on Kubernetes. * * @param tenantId ID of the application's Azure Active Directory tenant. Also called its directory ID. * @param clientId The client ID of an Azure AD app registration. * @param federatedTokenFilePath The path to a file containing a Kubernetes service account token that authenticates the identity. * @param authorityHost The authority host to authenticate against. * @param identityClientOptions The identity client options to use for authentication. */ WorkloadIdentityCredential(String clientId, String tenantId, String federatedTokenFilePath, String authorityHost, IdentityClientOptions identityClientOptions) { identityClient = new IdentityClientBuilder() .clientAssertionPath(federatedTokenFilePath) .clientId(clientId) .tenantId(tenantId) .identityClientOptions(identityClientOptions.setAuthorityHost(authorityHost)) .build(); ValidationUtil.validateTenantIdCharacterRange(tenantId, LOGGER); credentialAvailable = !(CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(authorityHost)); } @Override }
class WorkloadIdentityCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(WorkloadIdentityCredential.class); private final IdentityClient identityClient; /** * WorkloadIdentityCredential supports Azure workload identity on Kubernetes. * * @param tenantId ID of the application's Azure Active Directory tenant. Also called its directory ID. * @param clientId The client ID of an Azure AD app registration. * @param federatedTokenFilePath The path to a file containing a Kubernetes service account token that authenticates the identity. * @param identityClientOptions The identity client options to use for authentication. */ WorkloadIdentityCredential(String tenantId, String clientId, String federatedTokenFilePath, IdentityClientOptions identityClientOptions) { ValidationUtil.validateTenantIdCharacterRange(tenantId, LOGGER); identityClient = new IdentityClientBuilder() .clientAssertionPath(federatedTokenFilePath) .clientId(clientId) .tenantId(tenantId) .identityClientOptions(identityClientOptions) .build(); } @Override String getClientId() { return this.identityClient.getClientId(); } }
Could use `FluxUtils.monoError` in this case.
public Mono<AccessToken> getToken(TokenRequestContext request) { if (credentialAvailable) { return identityClient.authenticateWithExchangeToken(request); } else { return Mono.error(LOGGER.logExceptionAsError(new CredentialUnavailableException("WorkloadIdentityCredential authentication unavailable." + " Environment variables are not fully configured."))); } }
return Mono.error(LOGGER.logExceptionAsError(new CredentialUnavailableException("WorkloadIdentityCredential authentication unavailable."
public Mono<AccessToken> getToken(TokenRequestContext request) { return identityClient.authenticateWithExchangeToken(request); }
class WorkloadIdentityCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(WorkloadIdentityCredential.class); private final IdentityClient identityClient; private final boolean credentialAvailable; /** * WorkloadIdentityCredential supports Azure workload identity on Kubernetes. * * @param tenantId ID of the application's Azure Active Directory tenant. Also called its directory ID. * @param clientId The client ID of an Azure AD app registration. * @param federatedTokenFilePath The path to a file containing a Kubernetes service account token that authenticates the identity. * @param authorityHost The authority host to authenticate against. * @param identityClientOptions The identity client options to use for authentication. */ WorkloadIdentityCredential(String clientId, String tenantId, String federatedTokenFilePath, String authorityHost, IdentityClientOptions identityClientOptions) { identityClient = new IdentityClientBuilder() .clientAssertionPath(federatedTokenFilePath) .clientId(clientId) .tenantId(tenantId) .identityClientOptions(identityClientOptions.setAuthorityHost(authorityHost)) .build(); ValidationUtil.validateTenantIdCharacterRange(tenantId, LOGGER); credentialAvailable = !(CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(authorityHost)); } @Override }
class WorkloadIdentityCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(WorkloadIdentityCredential.class); private final IdentityClient identityClient; /** * WorkloadIdentityCredential supports Azure workload identity on Kubernetes. * * @param tenantId ID of the application's Azure Active Directory tenant. Also called its directory ID. * @param clientId The client ID of an Azure AD app registration. * @param federatedTokenFilePath The path to a file containing a Kubernetes service account token that authenticates the identity. * @param identityClientOptions The identity client options to use for authentication. */ WorkloadIdentityCredential(String tenantId, String clientId, String federatedTokenFilePath, IdentityClientOptions identityClientOptions) { ValidationUtil.validateTenantIdCharacterRange(tenantId, LOGGER); identityClient = new IdentityClientBuilder() .clientAssertionPath(federatedTokenFilePath) .clientId(clientId) .tenantId(tenantId) .identityClientOptions(identityClientOptions) .build(); } @Override String getClientId() { return this.identityClient.getClientId(); } }
nit: any reason this is done eagerly? Couldn't this be deferred to `build` by checking if `clientId` is null or empty.
public WorkloadIdentityCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); }
clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID);
public WorkloadIdentityCredentialBuilder() { }
class WorkloadIdentityCredentialBuilder extends CredentialBuilderBase<WorkloadIdentityCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(WorkloadIdentityCredentialBuilder.class); private String clientId; /** * Creates an instance of a WorkloadIdentityCredentialBuilder. */ /** * Specifies the client ID of managed identity, when this credential is running * in Azure Kubernetes. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. * * @param clientId the client ID * @return the WorkloadIdentityCredentialBuilder itself */ public WorkloadIdentityCredentialBuilder clientId(String clientId) { this.clientId = clientId; return this; } /** * Creates new {@link WorkloadIdentityCredential} with the configured options set. * * @return a {@link WorkloadIdentityCredential} with the current configurations. */ public WorkloadIdentityCredential build() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); return new WorkloadIdentityCredential(clientId, tenantId, federatedTokenFilePath, azureAuthorityHost, identityClientOptions.clone()); } }
class WorkloadIdentityCredentialBuilder extends AadCredentialBuilderBase<WorkloadIdentityCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(WorkloadIdentityCredentialBuilder.class); private String tokenFilePath; /** * Creates an instance of a WorkloadIdentityCredentialBuilder. */ /** * Configure the path to a file containing a Kubernetes service account token that authenticates the identity. * The file path is required to authenticate. * * @param tokenFilePath the path to the file containing the token to use for authentication. * @return An updated instance of this builder with the tenant id set as specified. */ public WorkloadIdentityCredentialBuilder tokenFilePath(String tokenFilePath) { this.tokenFilePath = tokenFilePath; return this; } /** * Creates new {@link WorkloadIdentityCredential} with the configured options set. * * @return a {@link WorkloadIdentityCredential} with the current configurations. */ public WorkloadIdentityCredential build() { ValidationUtil.validate(this.getClass().getSimpleName(), LOGGER, "Client ID", clientId, "Tenant ID", tenantId, "Service Token File Path", tokenFilePath); return new WorkloadIdentityCredential(clientId, tenantId, tokenFilePath, identityClientOptions.clone()); } }
nit: Couldn't we clone the global configuration here so we don't need the try/catch to revert modfications to the global configuration. Also, the current way of testing this can run into race conditions if there are exception testing cases where the configuration properties are expected to not exist.
public void testWorkloadIdentityFlow() { Configuration configuration = Configuration.getGlobalConfiguration(); try { String endpoint = "https: String token1 = "token1"; TokenRequestContext request1 = new TokenRequestContext().addScopes("https: OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1); configuration.put(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, endpoint); configuration.put(Configuration.PROPERTY_AZURE_CLIENT_ID, "dummy-clientId"); configuration.put(ManagedIdentityCredential.AZURE_FEDERATED_TOKEN_FILE, "dummy-file"); configuration.put(Configuration.PROPERTY_AZURE_TENANT_ID, "dummy-tenant"); try (MockedConstruction<IdentityClient> identityClientMock = mockConstruction(IdentityClient.class, (identityClient, context) -> { when(identityClient.authenticateWithExchangeToken(request1)).thenReturn(TestUtils.getMockAccessToken(token1, expiresAt)); })) { WorkloadIdentityCredential credential = new WorkloadIdentityCredentialBuilder().configuration(configuration).clientId(CLIENT_ID).build(); StepVerifier.create(credential.getToken(request1)) .expectNextMatches(token -> token1.equals(token.getToken()) && expiresAt.getSecond() == token.getExpiresAt().getSecond()) .verifyComplete(); Assert.assertNotNull(identityClientMock); } } finally { configuration.remove(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); configuration.remove(Configuration.PROPERTY_AZURE_CLIENT_ID); configuration.remove(ManagedIdentityCredential.AZURE_FEDERATED_TOKEN_FILE); configuration.remove(Configuration.PROPERTY_AZURE_TENANT_ID); } }
Configuration configuration = Configuration.getGlobalConfiguration();
public void testWorkloadIdentityFlow() { String endpoint = "https: String token1 = "token1"; TokenRequestContext request1 = new TokenRequestContext().addScopes("https: OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1); Configuration configuration = Configuration.getGlobalConfiguration().clone(); configuration.put(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, endpoint); try (MockedConstruction<IdentityClient> identityClientMock = mockConstruction(IdentityClient.class, (identityClient, context) -> { when(identityClient.authenticateWithExchangeToken(request1)).thenReturn(TestUtils.getMockAccessToken(token1, expiresAt)); })) { WorkloadIdentityCredential credential = new WorkloadIdentityCredentialBuilder() .tenantId("dummy-tenantid") .clientId("dummy-clientid") .tokenFilePath("dummy-path") .configuration(configuration).clientId(CLIENT_ID).build(); StepVerifier.create(credential.getToken(request1)) .expectNextMatches(token -> token1.equals(token.getToken()) && expiresAt.getSecond() == token.getExpiresAt().getSecond()) .verifyComplete(); Assert.assertNotNull(identityClientMock); } }
class WorkloadIdentityCredentialTest { private static final String CLIENT_ID = UUID.randomUUID().toString(); @Test @Test public void testWorkloadIdentityFlowFailure() { Configuration configuration = Configuration.getGlobalConfiguration(); try { String endpoint = "https: String token1 = "token1"; TokenRequestContext request1 = new TokenRequestContext().addScopes("https: OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1); configuration.put(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, endpoint); configuration.put(Configuration.PROPERTY_AZURE_CLIENT_ID, "dummy-clientId"); try (MockedConstruction<IdentityClient> identityClientMock = mockConstruction(IdentityClient.class, (identityClient, context) -> { when(identityClient.authenticateWithExchangeToken(request1)).thenReturn(TestUtils.getMockAccessToken(token1, expiresAt)); })) { WorkloadIdentityCredential credential = new WorkloadIdentityCredentialBuilder().configuration(configuration).clientId(CLIENT_ID).build(); StepVerifier.create(credential.getToken(request1)).expectErrorMatches(t -> t instanceof CredentialUnavailableException && t.getMessage().startsWith("WorkloadIdentityCredential authentication unavailable. ")).verify(); Assert.assertNotNull(identityClientMock); } } finally { configuration.remove(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); configuration.remove(Configuration.PROPERTY_AZURE_CLIENT_ID); } } }
class WorkloadIdentityCredentialTest { private static final String CLIENT_ID = UUID.randomUUID().toString(); @Test @Test(expected = IllegalArgumentException.class) public void testWorkloadIdentityFlowFailureNoTenantId() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); String endpoint = "https: configuration.put(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, endpoint); new WorkloadIdentityCredentialBuilder().configuration(configuration) .clientId(CLIENT_ID).tokenFilePath("dummy-path").build(); } @Test(expected = IllegalArgumentException.class) public void testWorkloadIdentityFlowFailureNoClientId() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); String endpoint = "https: configuration.put(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, endpoint); new WorkloadIdentityCredentialBuilder().configuration(configuration) .tenantId("TENANT_ID").tokenFilePath("dummy-path").build(); } @Test(expected = IllegalArgumentException.class) public void testWorkloadIdentityFlowFailureNoTokenPath() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); String endpoint = "https: configuration.put(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, endpoint); new WorkloadIdentityCredentialBuilder().configuration(configuration) .tenantId("tenant-id").clientId("client-id").build(); } }
yeah, this can be done, added it.
private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(managedIdentityClientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(managedIdentityClientId, tenantId, federatedTokenFilePath, azureAuthorityHost, identityClientOptions.clone()); } return null; }
|| CoreUtils.isNullOrEmpty(managedIdentityClientId)
private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(tenantId, clientId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; } }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { loadFallbackValuesFromEnvironment(); if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private void loadFallbackValuesFromEnvironment() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); tenantId = CoreUtils.isNullOrEmpty(tenantId) ? configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID) : tenantId; managedIdentityClientId = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID) : managedIdentityClientId; } private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; } }
it isn't possible for other credentials to be determined at construction level, as they have to execute first to reach that conclusion. This one is a niche auth scenario, only applicable to Kubernetes.
public Mono<AccessToken> getToken(TokenRequestContext request) { if (credentialAvailable) { return identityClient.authenticateWithExchangeToken(request); } else { return Mono.error(LOGGER.logExceptionAsError(new CredentialUnavailableException("WorkloadIdentityCredential authentication unavailable." + " Environment variables are not fully configured."))); } }
if (credentialAvailable) {
public Mono<AccessToken> getToken(TokenRequestContext request) { return identityClient.authenticateWithExchangeToken(request); }
class WorkloadIdentityCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(WorkloadIdentityCredential.class); private final IdentityClient identityClient; private final boolean credentialAvailable; /** * WorkloadIdentityCredential supports Azure workload identity on Kubernetes. * * @param tenantId ID of the application's Azure Active Directory tenant. Also called its directory ID. * @param clientId The client ID of an Azure AD app registration. * @param federatedTokenFilePath The path to a file containing a Kubernetes service account token that authenticates the identity. * @param authorityHost The authority host to authenticate against. * @param identityClientOptions The identity client options to use for authentication. */ WorkloadIdentityCredential(String clientId, String tenantId, String federatedTokenFilePath, String authorityHost, IdentityClientOptions identityClientOptions) { identityClient = new IdentityClientBuilder() .clientAssertionPath(federatedTokenFilePath) .clientId(clientId) .tenantId(tenantId) .identityClientOptions(identityClientOptions.setAuthorityHost(authorityHost)) .build(); ValidationUtil.validateTenantIdCharacterRange(tenantId, LOGGER); credentialAvailable = !(CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(authorityHost)); } @Override }
class WorkloadIdentityCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(WorkloadIdentityCredential.class); private final IdentityClient identityClient; /** * WorkloadIdentityCredential supports Azure workload identity on Kubernetes. * * @param tenantId ID of the application's Azure Active Directory tenant. Also called its directory ID. * @param clientId The client ID of an Azure AD app registration. * @param federatedTokenFilePath The path to a file containing a Kubernetes service account token that authenticates the identity. * @param identityClientOptions The identity client options to use for authentication. */ WorkloadIdentityCredential(String tenantId, String clientId, String federatedTokenFilePath, IdentityClientOptions identityClientOptions) { ValidationUtil.validateTenantIdCharacterRange(tenantId, LOGGER); identityClient = new IdentityClientBuilder() .clientAssertionPath(federatedTokenFilePath) .clientId(clientId) .tenantId(tenantId) .identityClientOptions(identityClientOptions) .build(); } @Override String getClientId() { return this.identityClient.getClientId(); } }
updated.
public Mono<AccessToken> getToken(TokenRequestContext request) { if (credentialAvailable) { return identityClient.authenticateWithExchangeToken(request); } else { return Mono.error(LOGGER.logExceptionAsError(new CredentialUnavailableException("WorkloadIdentityCredential authentication unavailable." + " Environment variables are not fully configured."))); } }
return Mono.error(LOGGER.logExceptionAsError(new CredentialUnavailableException("WorkloadIdentityCredential authentication unavailable."
public Mono<AccessToken> getToken(TokenRequestContext request) { return identityClient.authenticateWithExchangeToken(request); }
class WorkloadIdentityCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(WorkloadIdentityCredential.class); private final IdentityClient identityClient; private final boolean credentialAvailable; /** * WorkloadIdentityCredential supports Azure workload identity on Kubernetes. * * @param tenantId ID of the application's Azure Active Directory tenant. Also called its directory ID. * @param clientId The client ID of an Azure AD app registration. * @param federatedTokenFilePath The path to a file containing a Kubernetes service account token that authenticates the identity. * @param authorityHost The authority host to authenticate against. * @param identityClientOptions The identity client options to use for authentication. */ WorkloadIdentityCredential(String clientId, String tenantId, String federatedTokenFilePath, String authorityHost, IdentityClientOptions identityClientOptions) { identityClient = new IdentityClientBuilder() .clientAssertionPath(federatedTokenFilePath) .clientId(clientId) .tenantId(tenantId) .identityClientOptions(identityClientOptions.setAuthorityHost(authorityHost)) .build(); ValidationUtil.validateTenantIdCharacterRange(tenantId, LOGGER); credentialAvailable = !(CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(authorityHost)); } @Override }
class WorkloadIdentityCredential implements TokenCredential { private static final ClientLogger LOGGER = new ClientLogger(WorkloadIdentityCredential.class); private final IdentityClient identityClient; /** * WorkloadIdentityCredential supports Azure workload identity on Kubernetes. * * @param tenantId ID of the application's Azure Active Directory tenant. Also called its directory ID. * @param clientId The client ID of an Azure AD app registration. * @param federatedTokenFilePath The path to a file containing a Kubernetes service account token that authenticates the identity. * @param identityClientOptions The identity client options to use for authentication. */ WorkloadIdentityCredential(String tenantId, String clientId, String federatedTokenFilePath, IdentityClientOptions identityClientOptions) { ValidationUtil.validateTenantIdCharacterRange(tenantId, LOGGER); identityClient = new IdentityClientBuilder() .clientAssertionPath(federatedTokenFilePath) .clientId(clientId) .tenantId(tenantId) .identityClientOptions(identityClientOptions) .build(); } @Override String getClientId() { return this.identityClient.getClientId(); } }
done.
public void testWorkloadIdentityFlow() { Configuration configuration = Configuration.getGlobalConfiguration(); try { String endpoint = "https: String token1 = "token1"; TokenRequestContext request1 = new TokenRequestContext().addScopes("https: OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1); configuration.put(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, endpoint); configuration.put(Configuration.PROPERTY_AZURE_CLIENT_ID, "dummy-clientId"); configuration.put(ManagedIdentityCredential.AZURE_FEDERATED_TOKEN_FILE, "dummy-file"); configuration.put(Configuration.PROPERTY_AZURE_TENANT_ID, "dummy-tenant"); try (MockedConstruction<IdentityClient> identityClientMock = mockConstruction(IdentityClient.class, (identityClient, context) -> { when(identityClient.authenticateWithExchangeToken(request1)).thenReturn(TestUtils.getMockAccessToken(token1, expiresAt)); })) { WorkloadIdentityCredential credential = new WorkloadIdentityCredentialBuilder().configuration(configuration).clientId(CLIENT_ID).build(); StepVerifier.create(credential.getToken(request1)) .expectNextMatches(token -> token1.equals(token.getToken()) && expiresAt.getSecond() == token.getExpiresAt().getSecond()) .verifyComplete(); Assert.assertNotNull(identityClientMock); } } finally { configuration.remove(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); configuration.remove(Configuration.PROPERTY_AZURE_CLIENT_ID); configuration.remove(ManagedIdentityCredential.AZURE_FEDERATED_TOKEN_FILE); configuration.remove(Configuration.PROPERTY_AZURE_TENANT_ID); } }
Configuration configuration = Configuration.getGlobalConfiguration();
public void testWorkloadIdentityFlow() { String endpoint = "https: String token1 = "token1"; TokenRequestContext request1 = new TokenRequestContext().addScopes("https: OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1); Configuration configuration = Configuration.getGlobalConfiguration().clone(); configuration.put(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, endpoint); try (MockedConstruction<IdentityClient> identityClientMock = mockConstruction(IdentityClient.class, (identityClient, context) -> { when(identityClient.authenticateWithExchangeToken(request1)).thenReturn(TestUtils.getMockAccessToken(token1, expiresAt)); })) { WorkloadIdentityCredential credential = new WorkloadIdentityCredentialBuilder() .tenantId("dummy-tenantid") .clientId("dummy-clientid") .tokenFilePath("dummy-path") .configuration(configuration).clientId(CLIENT_ID).build(); StepVerifier.create(credential.getToken(request1)) .expectNextMatches(token -> token1.equals(token.getToken()) && expiresAt.getSecond() == token.getExpiresAt().getSecond()) .verifyComplete(); Assert.assertNotNull(identityClientMock); } }
class WorkloadIdentityCredentialTest { private static final String CLIENT_ID = UUID.randomUUID().toString(); @Test @Test public void testWorkloadIdentityFlowFailure() { Configuration configuration = Configuration.getGlobalConfiguration(); try { String endpoint = "https: String token1 = "token1"; TokenRequestContext request1 = new TokenRequestContext().addScopes("https: OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1); configuration.put(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, endpoint); configuration.put(Configuration.PROPERTY_AZURE_CLIENT_ID, "dummy-clientId"); try (MockedConstruction<IdentityClient> identityClientMock = mockConstruction(IdentityClient.class, (identityClient, context) -> { when(identityClient.authenticateWithExchangeToken(request1)).thenReturn(TestUtils.getMockAccessToken(token1, expiresAt)); })) { WorkloadIdentityCredential credential = new WorkloadIdentityCredentialBuilder().configuration(configuration).clientId(CLIENT_ID).build(); StepVerifier.create(credential.getToken(request1)).expectErrorMatches(t -> t instanceof CredentialUnavailableException && t.getMessage().startsWith("WorkloadIdentityCredential authentication unavailable. ")).verify(); Assert.assertNotNull(identityClientMock); } } finally { configuration.remove(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); configuration.remove(Configuration.PROPERTY_AZURE_CLIENT_ID); } } }
class WorkloadIdentityCredentialTest { private static final String CLIENT_ID = UUID.randomUUID().toString(); @Test @Test(expected = IllegalArgumentException.class) public void testWorkloadIdentityFlowFailureNoTenantId() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); String endpoint = "https: configuration.put(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, endpoint); new WorkloadIdentityCredentialBuilder().configuration(configuration) .clientId(CLIENT_ID).tokenFilePath("dummy-path").build(); } @Test(expected = IllegalArgumentException.class) public void testWorkloadIdentityFlowFailureNoClientId() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); String endpoint = "https: configuration.put(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, endpoint); new WorkloadIdentityCredentialBuilder().configuration(configuration) .tenantId("TENANT_ID").tokenFilePath("dummy-path").build(); } @Test(expected = IllegalArgumentException.class) public void testWorkloadIdentityFlowFailureNoTokenPath() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); String endpoint = "https: configuration.put(Configuration.PROPERTY_AZURE_AUTHORITY_HOST, endpoint); new WorkloadIdentityCredentialBuilder().configuration(configuration) .tenantId("tenant-id").clientId("client-id").build(); } }
This logic is removed as part of new changes.
public WorkloadIdentityCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); }
clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID);
public WorkloadIdentityCredentialBuilder() { }
class WorkloadIdentityCredentialBuilder extends CredentialBuilderBase<WorkloadIdentityCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(WorkloadIdentityCredentialBuilder.class); private String clientId; /** * Creates an instance of a WorkloadIdentityCredentialBuilder. */ /** * Specifies the client ID of managed identity, when this credential is running * in Azure Kubernetes. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. * * @param clientId the client ID * @return the WorkloadIdentityCredentialBuilder itself */ public WorkloadIdentityCredentialBuilder clientId(String clientId) { this.clientId = clientId; return this; } /** * Creates new {@link WorkloadIdentityCredential} with the configured options set. * * @return a {@link WorkloadIdentityCredential} with the current configurations. */ public WorkloadIdentityCredential build() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); return new WorkloadIdentityCredential(clientId, tenantId, federatedTokenFilePath, azureAuthorityHost, identityClientOptions.clone()); } }
class WorkloadIdentityCredentialBuilder extends AadCredentialBuilderBase<WorkloadIdentityCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(WorkloadIdentityCredentialBuilder.class); private String tokenFilePath; /** * Creates an instance of a WorkloadIdentityCredentialBuilder. */ /** * Configure the path to a file containing a Kubernetes service account token that authenticates the identity. * The file path is required to authenticate. * * @param tokenFilePath the path to the file containing the token to use for authentication. * @return An updated instance of this builder with the tenant id set as specified. */ public WorkloadIdentityCredentialBuilder tokenFilePath(String tokenFilePath) { this.tokenFilePath = tokenFilePath; return this; } /** * Creates new {@link WorkloadIdentityCredential} with the configured options set. * * @return a {@link WorkloadIdentityCredential} with the current configurations. */ public WorkloadIdentityCredential build() { ValidationUtil.validate(this.getClass().getSimpleName(), LOGGER, "Client ID", clientId, "Tenant ID", tenantId, "Service Token File Path", tokenFilePath); return new WorkloadIdentityCredential(clientId, tenantId, tokenFilePath, identityClientOptions.clone()); } }
If `managedIdentityClientId` is a public option, we should have another specifically for workload identity e.g. `workloadIdentityClientId` because we want to distinguish workload identity from managed identity. The precedence would then be: 1. workloadIdentityClientId 2. managedIdentityClientId 3. AZURE_CLIENT_ID
private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? null : getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; }
WorkloadIdentityCredential workloadIdentityCredential = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? null : getWorkloadIdentityCredentialIfAvailable();
private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(managedIdentityClientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(managedIdentityClientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; } }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { loadFallbackValuesFromEnvironment(); if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private void loadFallbackValuesFromEnvironment() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); tenantId = CoreUtils.isNullOrEmpty(tenantId) ? configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID) : tenantId; managedIdentityClientId = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID) : managedIdentityClientId; } private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(tenantId, clientId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; } }
added the workloadIdentityClientId option.
private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? null : getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; }
WorkloadIdentityCredential workloadIdentityCredential = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? null : getWorkloadIdentityCredentialIfAvailable();
private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(managedIdentityClientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(managedIdentityClientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; } }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { loadFallbackValuesFromEnvironment(); if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private void loadFallbackValuesFromEnvironment() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); tenantId = CoreUtils.isNullOrEmpty(tenantId) ? configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID) : tenantId; managedIdentityClientId = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID) : managedIdentityClientId; } private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(tenantId, clientId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; } }
Looks like we should unconditionally call the helper. When neither ClientID option is set AZURE_CLIENT_ID could still have a value; if it does, we should use that value.
private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) ? null : getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; }
WorkloadIdentityCredential workloadIdentityCredential = (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) ? null : getWorkloadIdentityCredentialIfAvailable();
private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(managedIdentityClientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; } }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { loadFallbackValuesFromEnvironment(); if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private void loadFallbackValuesFromEnvironment() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); tenantId = CoreUtils.isNullOrEmpty(tenantId) ? configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID) : tenantId; managedIdentityClientId = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID) : managedIdentityClientId; } private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(tenantId, clientId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; } }
Default value of managedIdentityClientId is value in AZURE_CLIENT_ID env var. User can override it by setting on the builder. so, the env var value check is captured automatically here.
private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) ? null : getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; }
WorkloadIdentityCredential workloadIdentityCredential = (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) ? null : getWorkloadIdentityCredentialIfAvailable();
private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(managedIdentityClientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; } }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { loadFallbackValuesFromEnvironment(); if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private void loadFallbackValuesFromEnvironment() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); tenantId = CoreUtils.isNullOrEmpty(tenantId) ? configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID) : tenantId; managedIdentityClientId = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID) : managedIdentityClientId; } private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(tenantId, clientId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; } }
Forgive me if I'm just failing to parse Java here, but is the value of `workloadIdentityClientId` ever used?
private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(managedIdentityClientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; }
return new WorkloadIdentityCredential(managedIdentityClientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone());
private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(tenantId, clientId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) ? null : getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; } }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { loadFallbackValuesFromEnvironment(); if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private void loadFallbackValuesFromEnvironment() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); tenantId = CoreUtils.isNullOrEmpty(tenantId) ? configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID) : tenantId; managedIdentityClientId = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID) : managedIdentityClientId; } private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; } }
yeah, that logic is handled behind the scenes, that compromises readability here. I've changed the logic to handle it here and make it more intuitive and readable.
private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(managedIdentityClientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; }
return new WorkloadIdentityCredential(managedIdentityClientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone());
private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(tenantId, clientId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) ? null : getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; } }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { loadFallbackValuesFromEnvironment(); if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private void loadFallbackValuesFromEnvironment() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); tenantId = CoreUtils.isNullOrEmpty(tenantId) ? configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID) : tenantId; managedIdentityClientId = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID) : managedIdentityClientId; } private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; } }
Right, in the case of more than one session error, if we see multiple entries of the first log and at least one of the second log entries, then it's good. It looks like one of the session errors resulting in the processor restart may cancel the merge operator, which could, in turn, cancel some enqueued errors to bounded-pool that are yet-to-run; this is why I said at least one-second log entry (statement is based on code scanning, I didn't try to simulate it. Something to double check in future when we investigate the effect of handling amp-error in _RetryWhen_ level vs Processor level, this [thread](https://github.com/Azure/azure-sdk-for-java/pull/33386#discussion_r1100499816))
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic()); } }))); }
return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic());
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
```suggestion return new WorkloadIdentityCredential(tenantId, clientId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); ``` Should also have a test case that would catch this by validating the tenant and client IDs. The client ID especially needs test coverage because there's 3 ways to set it and all of them can be used simultaneously.
private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(clientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; }
return new WorkloadIdentityCredential(clientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone());
private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(tenantId, clientId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) ? null : getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; } }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { loadFallbackValuesFromEnvironment(); if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private void loadFallbackValuesFromEnvironment() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); tenantId = CoreUtils.isNullOrEmpty(tenantId) ? configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID) : tenantId; managedIdentityClientId = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID) : managedIdentityClientId; } private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; } }
I see. It's maybe a good idea to remove this conditional anyway, unless the method call is expensive, because the helper checks the same conditions.
private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) ? null : getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; }
WorkloadIdentityCredential workloadIdentityCredential = (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) ? null : getWorkloadIdentityCredentialIfAvailable();
private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(managedIdentityClientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; } }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { loadFallbackValuesFromEnvironment(); if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private void loadFallbackValuesFromEnvironment() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); tenantId = CoreUtils.isNullOrEmpty(tenantId) ? configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID) : tenantId; managedIdentityClientId = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID) : managedIdentityClientId; } private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(tenantId, clientId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; } }
added more tests to validate this.
private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(clientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; }
return new WorkloadIdentityCredential(clientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone());
private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(tenantId, clientId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) ? null : getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; } }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { loadFallbackValuesFromEnvironment(); if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private void loadFallbackValuesFromEnvironment() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); tenantId = CoreUtils.isNullOrEmpty(tenantId) ? configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID) : tenantId; managedIdentityClientId = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID) : managedIdentityClientId; } private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; } }
removed this conditional check.
private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) ? null : getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; }
WorkloadIdentityCredential workloadIdentityCredential = (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) ? null : getWorkloadIdentityCredentialIfAvailable();
private ArrayList<TokenCredential> getCredentialsChain() { WorkloadIdentityCredential workloadIdentityCredential = getWorkloadIdentityCredentialIfAvailable(); ArrayList<TokenCredential> output = new ArrayList<TokenCredential>(workloadIdentityCredential != null ? 8 : 7); output.add(new EnvironmentCredential(identityClientOptions.clone())); if (workloadIdentityCredential != null) { output.add(workloadIdentityCredential); } output.add(new ManagedIdentityCredential(managedIdentityClientId, managedIdentityResourceId, identityClientOptions.clone())); output.add(new AzureDeveloperCliCredential(tenantId, identityClientOptions.clone())); output.add(new SharedTokenCacheCredential(null, IdentityConstants.DEVELOPER_SINGLE_SIGN_ON_ID, tenantId, identityClientOptions.clone())); output.add(new IntelliJCredential(tenantId, identityClientOptions.clone())); output.add(new AzureCliCredential(tenantId, identityClientOptions.clone())); output.add(new AzurePowerShellCredential(tenantId, identityClientOptions.clone())); return output; }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { Configuration configuration = Configuration.getGlobalConfiguration().clone(); tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); managedIdentityClientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || (CoreUtils.isNullOrEmpty(managedIdentityClientId) && CoreUtils.isNullOrEmpty(workloadIdentityClientId)) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(managedIdentityClientId, tenantId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; } }
class DefaultAzureCredentialBuilder extends CredentialBuilderBase<DefaultAzureCredentialBuilder> { private static final ClientLogger LOGGER = new ClientLogger(DefaultAzureCredentialBuilder.class); private String tenantId; private String managedIdentityClientId; private String workloadIdentityClientId; private String managedIdentityResourceId; private List<String> additionallyAllowedTenants = IdentityUtil .getAdditionalTenantsFromEnvironment(Configuration.getGlobalConfiguration().clone()); /** * Creates an instance of a DefaultAzureCredentialBuilder. */ public DefaultAzureCredentialBuilder() { this.identityClientOptions.setIdentityLogOptionsImpl(new IdentityLogOptionsImpl(true)); } /** * Sets the tenant id of the user to authenticate through the {@link DefaultAzureCredential}. If unset, the value * in the AZURE_TENANT_ID environment variable will be used. If neither is set, the default is null * and will authenticate users to their default tenant. * * @param tenantId the tenant ID to set. * @return An updated instance of this builder with the tenant id set as specified. */ public DefaultAzureCredentialBuilder tenantId(String tenantId) { this.tenantId = tenantId; return this; } /** * Specifies the Azure Active Directory endpoint to acquire tokens. * @param authorityHost the Azure Active Directory endpoint * @return An updated instance of this builder with the authority host set as specified. */ public DefaultAzureCredentialBuilder authorityHost(String authorityHost) { this.identityClientOptions.setAuthorityHost(authorityHost); return this; } /** * Specifies the KeePass database path to read the cached credentials of Azure toolkit for IntelliJ plugin. * The {@code databasePath} is required on Windows platform. For macOS and Linux platform native key chain / * key ring will be accessed respectively to retrieve the cached credentials. * * <p>This path can be located in the IntelliJ IDE. * Windows: File -&gt; Settings -&gt; Appearance &amp; Behavior -&gt; System Settings -&gt; Passwords. </p> * * @param databasePath the path to the KeePass database. * @throws IllegalArgumentException if {@code databasePath} is either not specified or is empty. * @return An updated instance of this builder with the KeePass database path set as specified. */ public DefaultAzureCredentialBuilder intelliJKeePassDatabasePath(String databasePath) { if (CoreUtils.isNullOrEmpty(databasePath)) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The KeePass database path is either empty or not configured." + " Please configure it on the builder.")); } this.identityClientOptions.setIntelliJKeePassDatabasePath(databasePath); return this; } /** * Specifies the client ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityClientId and managedIdentityResourceId can be specified. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityClientId(String clientId) { this.managedIdentityClientId = clientId; return this; } /** * Specifies the client ID of Azure AD app to be used for AKS workload identity authentication. * if unset, {@link DefaultAzureCredentialBuilder * If both values are unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If none are set, the default value is null and Workload Identity authentication will not be attempted. * * @param clientId the client ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder workloadIdentityClientId(String clientId) { this.workloadIdentityClientId = clientId; return this; } /** * Specifies the resource ID of user assigned or system assigned identity, when this credential is running * in an environment with managed identities. If unset, the value in the AZURE_CLIENT_ID environment variable * will be used. If neither is set, the default value is null and will only work with system assigned * managed identities and not user assigned managed identities. * * Only one of managedIdentityResourceId and managedIdentityClientId can be specified. * * @param resourceId the resource ID * @return the DefaultAzureCredentialBuilder itself */ public DefaultAzureCredentialBuilder managedIdentityResourceId(String resourceId) { this.managedIdentityResourceId = resourceId; return this; } /** * Specifies the ExecutorService to be used to execute the authentication requests. * Developer is responsible for maintaining the lifecycle of the ExecutorService. * * <p> * If this is not configured, the {@link ForkJoinPool * also shared with other application tasks. If the common pool is heavily used for other tasks, authentication * requests might starve and setting up this executor service should be considered. * </p> * * <p> The executor service and can be safely shutdown if the TokenCredential is no longer being used by the * Azure SDK clients and should be shutdown before the application exits. </p> * * @param executorService the executor service to use for executing authentication requests. * @return An updated instance of this builder with the executor service set as specified. */ public DefaultAzureCredentialBuilder executorService(ExecutorService executorService) { this.identityClientOptions.setExecutorService(executorService); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(Arrays.asList(additionallyAllowedTenants)); return this; } /** * For multi-tenant applications, specifies additional tenants for which the credential may acquire tokens. * Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the application is installed. * * @param additionallyAllowedTenants the additionally allowed tenants. * @return An updated instance of this builder with the tenant id set as specified. */ @SuppressWarnings("unchecked") public DefaultAzureCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) { this.additionallyAllowedTenants = IdentityUtil.resolveAdditionalTenants(additionallyAllowedTenants); return this; } /** * Creates new {@link DefaultAzureCredential} with the configured options set. * * @return a {@link DefaultAzureCredential} with the current configurations. * @throws IllegalStateException if clientId and resourceId are both set. */ public DefaultAzureCredential build() { loadFallbackValuesFromEnvironment(); if (managedIdentityClientId != null && managedIdentityResourceId != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Only one of managedIdentityResourceId and managedIdentityClientId can be specified.")); } if (!CoreUtils.isNullOrEmpty(additionallyAllowedTenants)) { identityClientOptions.setAdditionallyAllowedTenants(additionallyAllowedTenants); } return new DefaultAzureCredential(getCredentialsChain()); } private void loadFallbackValuesFromEnvironment() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); tenantId = CoreUtils.isNullOrEmpty(tenantId) ? configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID) : tenantId; managedIdentityClientId = CoreUtils.isNullOrEmpty(managedIdentityClientId) ? configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID) : managedIdentityClientId; } private WorkloadIdentityCredential getWorkloadIdentityCredentialIfAvailable() { Configuration configuration = identityClientOptions.getConfiguration() == null ? Configuration.getGlobalConfiguration().clone() : identityClientOptions.getConfiguration(); String tenantId = configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID); String federatedTokenFilePath = configuration.get(AZURE_FEDERATED_TOKEN_FILE); String azureAuthorityHost = configuration.get(Configuration.PROPERTY_AZURE_AUTHORITY_HOST); String clientId = CoreUtils.isNullOrEmpty(workloadIdentityClientId) ? managedIdentityClientId : workloadIdentityClientId; if (!(CoreUtils.isNullOrEmpty(tenantId) || CoreUtils.isNullOrEmpty(federatedTokenFilePath) || CoreUtils.isNullOrEmpty(clientId) || CoreUtils.isNullOrEmpty(azureAuthorityHost))) { return new WorkloadIdentityCredential(tenantId, clientId, federatedTokenFilePath, identityClientOptions.setAuthorityHost(azureAuthorityHost).clone()); } return null; } }
is `flux.create` guarantees this callback will run sequentially even when concurrency is > 1? Otherwise, iterator looks dangerous here
private void createHttpRequest(ObjectSerializer serializer, Iterator<Object> iterator, FluxSink<LogsIngestionRequest> emitter) { try { long currentBatchSize = 0; ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream); generator.writeStartArray(); List<String> serializedLogs = new ArrayList<>(); List<Object> originalLogsRequest = new ArrayList<>(); while (iterator.hasNext()) { Object currentLog = iterator.next(); byte[] bytes = serializer.serializeToBytes(currentLog); int currentLogSize = bytes.length; currentBatchSize += currentLogSize; if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) { writeLogsAndCloseJsonGenerator(generator, serializedLogs); byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray()); emitter.next(new LogsIngestionRequest(originalLogsRequest, zippedRequestBody)); byteArrayOutputStream = new ByteArrayOutputStream(); generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream); generator.writeStartArray(); currentBatchSize = currentLogSize; originalLogsRequest = new ArrayList<>(); serializedLogs.clear(); } serializedLogs.add(new String(bytes, StandardCharsets.UTF_8)); originalLogsRequest.add(currentLog); } if (currentBatchSize > 0) { writeLogsAndCloseJsonGenerator(generator, serializedLogs); byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray()); emitter.next(new LogsIngestionRequest(originalLogsRequest, zippedRequestBody)); } emitter.complete(); } catch (IOException e) { emitter.error(e); } }
while (iterator.hasNext()) {
private void createHttpRequest(ObjectSerializer serializer, Iterator<Object> iterator, FluxSink<LogsIngestionRequest> emitter) { try { long currentBatchSize = 0; ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream); generator.writeStartArray(); List<String> serializedLogs = new ArrayList<>(); List<Object> originalLogsRequest = new ArrayList<>(); while (iterator.hasNext()) { Object currentLog = iterator.next(); byte[] bytes = serializer.serializeToBytes(currentLog); int currentLogSize = bytes.length; currentBatchSize += currentLogSize; if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) { writeLogsAndCloseJsonGenerator(generator, serializedLogs); byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray()); emitter.next(new LogsIngestionRequest(originalLogsRequest, zippedRequestBody)); byteArrayOutputStream = new ByteArrayOutputStream(); generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream); generator.writeStartArray(); currentBatchSize = currentLogSize; originalLogsRequest = new ArrayList<>(); serializedLogs.clear(); } serializedLogs.add(new String(bytes, StandardCharsets.UTF_8)); originalLogsRequest.add(currentLog); } if (currentBatchSize > 0) { writeLogsAndCloseJsonGenerator(generator, serializedLogs); byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray()); emitter.next(new LogsIngestionRequest(originalLogsRequest, zippedRequestBody)); } emitter.complete(); } catch (IOException e) { emitter.error(e); } }
class LogsIngestionAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class); private static final String CONTENT_ENCODING = "Content-Encoding"; private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024; private static final String GZIP = "gzip"; private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true); private final IngestionUsingDataCollectionRulesAsyncClient service; LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) { this.service = service; } /** * Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be * too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split * the input logs into multiple smaller requests before sending to the service. * * <p><strong>Upload logs to Azure Monitor</strong></p> * <!-- src_embed com.azure.monitor.ingestion.LogsIngestionAsyncClient.upload --> * <pre> * List&lt;Object&gt; logs = getLogs& * logsIngestionAsyncClient.upload& * .subscribe& * </pre> * <!-- end com.azure.monitor.ingestion.LogsIngestionAsyncClient.upload --> * * @param ruleId the data collection rule id that is configured to collect and transform the logs. * @param streamName the stream name configured in data collection rule that matches defines the structure of the * logs sent in this request. * @param logs the collection of logs to be uploaded. * @return the {@link Mono} that completes on completion of the upload request. * @throws NullPointerException if any of {@code ruleId}, {@code streamName} or {@code logs} are null. * @throws IllegalArgumentException if {@code logs} is empty. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> upload(String ruleId, String streamName, Iterable<Object> logs) { return upload(ruleId, streamName, logs, new UploadLogsOptions()); } /** * Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be * too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split * the input logs into multiple smaller requests before sending to the service. * * <p><strong>Upload logs to Azure Monitor</strong></p> * <!-- src_embed com.azure.monitor.ingestion.LogsIngestionAsyncClient.uploadWithConcurrency --> * <pre> * List&lt;Object&gt; logs = getLogs& * UploadLogsOptions uploadLogsOptions = new UploadLogsOptions& * logsIngestionAsyncClient.upload& * .subscribe& * </pre> * <!-- end com.azure.monitor.ingestion.LogsIngestionAsyncClient.uploadWithConcurrency --> * * @param ruleId the data collection rule id that is configured to collect and transform the logs. * @param streamName the stream name configured in data collection rule that matches defines the structure of the * logs sent in this request. * @param logs the collection of logs to be uploaded. * @param options the options to configure the upload request. * @return the {@link Mono} that completes on completion of the upload request. * @throws NullPointerException if any of {@code ruleId}, {@code streamName} or {@code logs} are null. * @throws IllegalArgumentException if {@code logs} is empty. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> upload(String ruleId, String streamName, Iterable<Object> logs, UploadLogsOptions options) { return withContext(context -> upload(ruleId, streamName, logs, options, context)); } /** * See error response code and error response message for more detail. * * <p><strong>Header Parameters</strong> * * <table border="1"> * <caption>Header Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>Content-Encoding</td><td>String</td><td>No</td><td>gzip</td></tr> * <tr><td>x-ms-client-request-id</td><td>String</td><td>No</td><td>Client request Id</td></tr> * </table> * * <p><strong>Request Body Schema</strong> * * <pre>{@code * [ * Object * ] * }</pre> * * @param ruleId The immutable Id of the Data Collection Rule resource. * @param streamName The streamDeclaration name as defined in the Data Collection Rule. * @param logs An array of objects matching the schema defined by the provided stream. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return the {@link Response} on successful completion of {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> uploadWithResponse( String ruleId, String streamName, BinaryData logs, RequestOptions requestOptions) { Objects.requireNonNull(ruleId, "'ruleId' cannot be null."); Objects.requireNonNull(streamName, "'streamName' cannot be null."); Objects.requireNonNull(logs, "'logs' cannot be null."); if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.addRequestCallback(request -> { HttpHeader httpHeader = request.getHeaders().get(CONTENT_ENCODING); if (httpHeader == null) { BinaryData gzippedRequest = BinaryData.fromBytes(gzipRequest(logs.toBytes())); request.setBody(gzippedRequest); request.setHeader(CONTENT_ENCODING, GZIP); } }); return service.uploadWithResponse(ruleId, streamName, logs, requestOptions); } Mono<Void> upload(String ruleId, String streamName, Iterable<Object> logs, UploadLogsOptions options, Context context) { return Mono.defer(() -> splitAndUpload(ruleId, streamName, logs, options, context)); } /** * This method splits the input logs into < 1MB HTTP requests and uploads to the Azure Monitor service. * @param ruleId The data collection rule id. * @param streamName The stream name configured in the data collection rule. * @param logs The input logs to upload. * @param options The options to configure the upload request. * @param context additional context that is passed through the Http pipeline during the service call. If no * additional context is required, pass {@link Context * @return the {@link Mono} that completes on completion of the upload request. */ private Mono<Void> splitAndUpload(String ruleId, String streamName, Iterable<Object> logs, UploadLogsOptions options, Context context) { int concurrency = 1; ObjectSerializer objectSerializer = DEFAULT_SERIALIZER; if (options != null) { if (options.getObjectSerializer() != null) { objectSerializer = options.getObjectSerializer(); } if (options.getMaxConcurrency() != null) { concurrency = options.getMaxConcurrency(); } } ObjectSerializer serializer = objectSerializer; Iterator<Object> iterator = logs.iterator(); return Flux.<LogsIngestionRequest>create(emitter -> createHttpRequest(serializer, iterator, emitter)) .flatMapSequential(request -> uploadToService(ruleId, streamName, context, request), concurrency) .<UploadLogsException>handle((responseHolder, sink) -> processResponse(options, responseHolder, sink)) .collectList() .handle((result, sink) -> processExceptions(result, sink)); } /** * This method converts raw {@link Object} logs into serialized and gzipped byte array that forms the request * body of the service call. * @param serializer The serializer used to serialize logs. * @param iterator The input logs interator. * @param emitter The Flux emitter to which the HTTP request is emitted. */ private void processExceptions(List<UploadLogsException> result, SynchronousSink<Void> sink) { long failedLogsCount = 0L; List<HttpResponseException> exceptions = new ArrayList<>(); for (UploadLogsException exception : result) { exceptions.addAll(exception.getUploadLogsErrors()); failedLogsCount += exception.getFailedLogsCount(); } if (!exceptions.isEmpty()) { sink.error(new UploadLogsException(exceptions, failedLogsCount)); } else { sink.complete(); } } private void processResponse(UploadLogsOptions options, UploadLogsResponseHolder responseHolder, SynchronousSink<UploadLogsException> sink) { if (responseHolder.getException() != null) { Consumer<UploadLogsError> uploadLogsErrorConsumer = null; if (options != null) { uploadLogsErrorConsumer = options.getUploadLogsErrorConsumer(); } if (uploadLogsErrorConsumer != null) { uploadLogsErrorConsumer.accept(new UploadLogsError(responseHolder.getException(), responseHolder.getRequest().getLogs())); return; } sink.next(new UploadLogsException(Collections.singletonList(responseHolder.getException()), responseHolder.getRequest().getLogs().size())); } } private Mono<UploadLogsResponseHolder> uploadToService(String ruleId, String streamName, Context context, LogsIngestionRequest request) { RequestOptions requestOptions = new RequestOptions() .addHeader(CONTENT_ENCODING, GZIP) .setContext(context); return service.uploadWithResponse(ruleId, streamName, BinaryData.fromBytes(request.getRequestBody()), requestOptions) .map(response -> new UploadLogsResponseHolder(null, null)) .onErrorResume(HttpResponseException.class, ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(request, ex))); } private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException { generator.writeRaw(serializedLogs.stream() .collect(Collectors.joining(","))); generator.writeEndArray(); generator.close(); } /** * Gzips the input byte array. * @param bytes The input byte array. * @return gzipped byte array. */ private byte[] gzipRequest(byte[] bytes) { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) { zip.write(bytes); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } return byteArrayOutputStream.toByteArray(); } }
class LogsIngestionAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class); private static final String CONTENT_ENCODING = "Content-Encoding"; private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024; private static final String GZIP = "gzip"; private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true); private final IngestionUsingDataCollectionRulesAsyncClient service; LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) { this.service = service; } /** * Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be * too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split * the input logs into multiple smaller requests before sending to the service. * * <p><strong>Upload logs to Azure Monitor</strong></p> * <!-- src_embed com.azure.monitor.ingestion.LogsIngestionAsyncClient.upload --> * <pre> * List&lt;Object&gt; logs = getLogs& * logsIngestionAsyncClient.upload& * .subscribe& * </pre> * <!-- end com.azure.monitor.ingestion.LogsIngestionAsyncClient.upload --> * * @param ruleId the data collection rule id that is configured to collect and transform the logs. * @param streamName the stream name configured in data collection rule that matches defines the structure of the * logs sent in this request. * @param logs the collection of logs to be uploaded. * @return the {@link Mono} that completes on completion of the upload request. * @throws NullPointerException if any of {@code ruleId}, {@code streamName} or {@code logs} are null. * @throws IllegalArgumentException if {@code logs} is empty. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> upload(String ruleId, String streamName, Iterable<Object> logs) { return upload(ruleId, streamName, logs, new UploadLogsOptions()); } /** * Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be * too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split * the input logs into multiple smaller requests before sending to the service. * * <p><strong>Upload logs to Azure Monitor</strong></p> * <!-- src_embed com.azure.monitor.ingestion.LogsIngestionAsyncClient.uploadWithConcurrency --> * <pre> * List&lt;Object&gt; logs = getLogs& * UploadLogsOptions uploadLogsOptions = new UploadLogsOptions& * logsIngestionAsyncClient.upload& * .subscribe& * </pre> * <!-- end com.azure.monitor.ingestion.LogsIngestionAsyncClient.uploadWithConcurrency --> * * @param ruleId the data collection rule id that is configured to collect and transform the logs. * @param streamName the stream name configured in data collection rule that matches defines the structure of the * logs sent in this request. * @param logs the collection of logs to be uploaded. * @param options the options to configure the upload request. * @return the {@link Mono} that completes on completion of the upload request. * @throws NullPointerException if any of {@code ruleId}, {@code streamName} or {@code logs} are null. * @throws IllegalArgumentException if {@code logs} is empty. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> upload(String ruleId, String streamName, Iterable<Object> logs, UploadLogsOptions options) { return withContext(context -> upload(ruleId, streamName, logs, options, context)); } /** * See error response code and error response message for more detail. * * <p><strong>Header Parameters</strong> * * <table border="1"> * <caption>Header Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>Content-Encoding</td><td>String</td><td>No</td><td>gzip</td></tr> * <tr><td>x-ms-client-request-id</td><td>String</td><td>No</td><td>Client request Id</td></tr> * </table> * * <p><strong>Request Body Schema</strong> * * <pre>{@code * [ * Object * ] * }</pre> * * @param ruleId The immutable Id of the Data Collection Rule resource. * @param streamName The streamDeclaration name as defined in the Data Collection Rule. * @param logs An array of objects matching the schema defined by the provided stream. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return the {@link Response} on successful completion of {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> uploadWithResponse( String ruleId, String streamName, BinaryData logs, RequestOptions requestOptions) { Objects.requireNonNull(ruleId, "'ruleId' cannot be null."); Objects.requireNonNull(streamName, "'streamName' cannot be null."); Objects.requireNonNull(logs, "'logs' cannot be null."); if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.addRequestCallback(request -> { HttpHeader httpHeader = request.getHeaders().get(CONTENT_ENCODING); if (httpHeader == null) { BinaryData gzippedRequest = BinaryData.fromBytes(gzipRequest(logs.toBytes())); request.setBody(gzippedRequest); request.setHeader(CONTENT_ENCODING, GZIP); } }); return service.uploadWithResponse(ruleId, streamName, logs, requestOptions); } Mono<Void> upload(String ruleId, String streamName, Iterable<Object> logs, UploadLogsOptions options, Context context) { return Mono.defer(() -> splitAndUpload(ruleId, streamName, logs, options, context)); } /** * This method splits the input logs into < 1MB HTTP requests and uploads to the Azure Monitor service. * @param ruleId The data collection rule id. * @param streamName The stream name configured in the data collection rule. * @param logs The input logs to upload. * @param options The options to configure the upload request. * @param context additional context that is passed through the Http pipeline during the service call. If no * additional context is required, pass {@link Context * @return the {@link Mono} that completes on completion of the upload request. */ private Mono<Void> splitAndUpload(String ruleId, String streamName, Iterable<Object> logs, UploadLogsOptions options, Context context) { int concurrency = 1; ObjectSerializer objectSerializer = DEFAULT_SERIALIZER; if (options != null) { if (options.getObjectSerializer() != null) { objectSerializer = options.getObjectSerializer(); } if (options.getMaxConcurrency() != null) { concurrency = options.getMaxConcurrency(); } } ObjectSerializer serializer = objectSerializer; Iterator<Object> iterator = logs.iterator(); return Flux.<LogsIngestionRequest>create(emitter -> createHttpRequest(serializer, iterator, emitter)) .flatMapSequential(request -> uploadToService(ruleId, streamName, context, request), concurrency) .<UploadLogsException>handle((responseHolder, sink) -> processResponse(options, responseHolder, sink)) .collectList() .handle((result, sink) -> processExceptions(result, sink)); } /** * This method converts raw {@link Object} logs into serialized and gzipped byte array that forms the request * body of the service call. * @param serializer The serializer used to serialize logs. * @param iterator The input logs interator. * @param emitter The Flux emitter to which the HTTP request is emitted. */ private void processExceptions(List<UploadLogsException> result, SynchronousSink<Void> sink) { long failedLogsCount = 0L; List<HttpResponseException> exceptions = new ArrayList<>(); for (UploadLogsException exception : result) { exceptions.addAll(exception.getUploadLogsErrors()); failedLogsCount += exception.getFailedLogsCount(); } if (!exceptions.isEmpty()) { sink.error(new UploadLogsException(exceptions, failedLogsCount)); } else { sink.complete(); } } private void processResponse(UploadLogsOptions options, UploadLogsResponseHolder responseHolder, SynchronousSink<UploadLogsException> sink) { if (responseHolder.getException() != null) { Consumer<UploadLogsError> uploadLogsErrorConsumer = null; if (options != null) { uploadLogsErrorConsumer = options.getUploadLogsErrorConsumer(); } if (uploadLogsErrorConsumer != null) { uploadLogsErrorConsumer.accept(new UploadLogsError(responseHolder.getException(), responseHolder.getRequest().getLogs())); return; } sink.next(new UploadLogsException(Collections.singletonList(responseHolder.getException()), responseHolder.getRequest().getLogs().size())); } } private Mono<UploadLogsResponseHolder> uploadToService(String ruleId, String streamName, Context context, LogsIngestionRequest request) { RequestOptions requestOptions = new RequestOptions() .addHeader(CONTENT_ENCODING, GZIP) .setContext(context); return service.uploadWithResponse(ruleId, streamName, BinaryData.fromBytes(request.getRequestBody()), requestOptions) .map(response -> new UploadLogsResponseHolder(null, null)) .onErrorResume(HttpResponseException.class, ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(request, ex))); } private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException { generator.writeRaw(serializedLogs.stream() .collect(Collectors.joining(","))); generator.writeEndArray(); generator.close(); } /** * Gzips the input byte array. * @param bytes The input byte array. * @return gzipped byte array. */ private byte[] gzipRequest(byte[] bytes) { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) { zip.write(bytes); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } return byteArrayOutputStream.toByteArray(); } }
nit (and not-blocking): would be nice to add an example for async error subscriber callback and error callback in upload options to demonstrate behavior in this case.
private void run() throws InterruptedException { LogsIngestionAsyncClient client = new LogsIngestionClientBuilder() .endpoint("<data-collection-endpoint>") .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); CountDownLatch countdownLatch = new CountDownLatch(1); List<Object> dataList = getLogs(); Mono<Void> resultMono = client.upload("<data-collection-rule-id>", "<stream-name>", dataList); resultMono.subscribe( ignored -> { }, error -> { if (error instanceof UploadLogsException) { UploadLogsException ex = (UploadLogsException) error; System.out.println("Failed to upload " + ex.getFailedLogsCount() + "logs."); } }, countdownLatch::countDown); countdownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); }
Mono<Void> resultMono = client.upload("<data-collection-rule-id>",
private void run() throws InterruptedException { LogsIngestionAsyncClient client = new LogsIngestionClientBuilder() .endpoint("<data-collection-endpoint>") .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); CountDownLatch countdownLatch = new CountDownLatch(1); List<Object> dataList = getLogs(); Mono<Void> resultMono = client.upload("<data-collection-rule-id>", "<stream-name>", dataList); resultMono.subscribe( ignored -> { }, error -> { if (error instanceof UploadLogsException) { UploadLogsException ex = (UploadLogsException) error; System.out.println("Failed to upload " + ex.getFailedLogsCount() + "logs."); } }, countdownLatch::countDown); countdownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); }
class UploadLogsAsyncClientSample { private static final Duration TIMEOUT = Duration.ofSeconds(10); /** * Main method to run the sample. * @param args ignore args. */ public static void main(String[] args) throws InterruptedException { UploadLogsAsyncClientSample sample = new UploadLogsAsyncClientSample(); sample.run(); } private static List<Object> getLogs() { List<Object> logs = new ArrayList<>(); for (int i = 0; i < 10; i++) { CustomLogData e = new CustomLogData() .setTime(OffsetDateTime.now()) .setExtendedColumn("extend column data" + i); logs.add(e); } return logs; } }
class UploadLogsAsyncClientSample { private static final Duration TIMEOUT = Duration.ofSeconds(10); /** * Main method to run the sample. * @param args ignore args. */ public static void main(String[] args) throws InterruptedException { UploadLogsAsyncClientSample sample = new UploadLogsAsyncClientSample(); sample.run(); } private static List<Object> getLogs() { List<Object> logs = new ArrayList<>(); for (int i = 0; i < 10; i++) { CustomLogData e = new CustomLogData() .setTime(OffsetDateTime.now()) .setExtendedColumn("extend column data" + i); logs.add(e); } return logs; } }
Yes, concurrency only impacts the downstream call `flatMapSequential`. `Flux.create()` will run sequentially.
private void createHttpRequest(ObjectSerializer serializer, Iterator<Object> iterator, FluxSink<LogsIngestionRequest> emitter) { try { long currentBatchSize = 0; ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream); generator.writeStartArray(); List<String> serializedLogs = new ArrayList<>(); List<Object> originalLogsRequest = new ArrayList<>(); while (iterator.hasNext()) { Object currentLog = iterator.next(); byte[] bytes = serializer.serializeToBytes(currentLog); int currentLogSize = bytes.length; currentBatchSize += currentLogSize; if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) { writeLogsAndCloseJsonGenerator(generator, serializedLogs); byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray()); emitter.next(new LogsIngestionRequest(originalLogsRequest, zippedRequestBody)); byteArrayOutputStream = new ByteArrayOutputStream(); generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream); generator.writeStartArray(); currentBatchSize = currentLogSize; originalLogsRequest = new ArrayList<>(); serializedLogs.clear(); } serializedLogs.add(new String(bytes, StandardCharsets.UTF_8)); originalLogsRequest.add(currentLog); } if (currentBatchSize > 0) { writeLogsAndCloseJsonGenerator(generator, serializedLogs); byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray()); emitter.next(new LogsIngestionRequest(originalLogsRequest, zippedRequestBody)); } emitter.complete(); } catch (IOException e) { emitter.error(e); } }
while (iterator.hasNext()) {
private void createHttpRequest(ObjectSerializer serializer, Iterator<Object> iterator, FluxSink<LogsIngestionRequest> emitter) { try { long currentBatchSize = 0; ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); JsonGenerator generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream); generator.writeStartArray(); List<String> serializedLogs = new ArrayList<>(); List<Object> originalLogsRequest = new ArrayList<>(); while (iterator.hasNext()) { Object currentLog = iterator.next(); byte[] bytes = serializer.serializeToBytes(currentLog); int currentLogSize = bytes.length; currentBatchSize += currentLogSize; if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) { writeLogsAndCloseJsonGenerator(generator, serializedLogs); byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray()); emitter.next(new LogsIngestionRequest(originalLogsRequest, zippedRequestBody)); byteArrayOutputStream = new ByteArrayOutputStream(); generator = JsonFactory.builder().build().createGenerator(byteArrayOutputStream); generator.writeStartArray(); currentBatchSize = currentLogSize; originalLogsRequest = new ArrayList<>(); serializedLogs.clear(); } serializedLogs.add(new String(bytes, StandardCharsets.UTF_8)); originalLogsRequest.add(currentLog); } if (currentBatchSize > 0) { writeLogsAndCloseJsonGenerator(generator, serializedLogs); byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray()); emitter.next(new LogsIngestionRequest(originalLogsRequest, zippedRequestBody)); } emitter.complete(); } catch (IOException e) { emitter.error(e); } }
class LogsIngestionAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class); private static final String CONTENT_ENCODING = "Content-Encoding"; private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024; private static final String GZIP = "gzip"; private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true); private final IngestionUsingDataCollectionRulesAsyncClient service; LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) { this.service = service; } /** * Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be * too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split * the input logs into multiple smaller requests before sending to the service. * * <p><strong>Upload logs to Azure Monitor</strong></p> * <!-- src_embed com.azure.monitor.ingestion.LogsIngestionAsyncClient.upload --> * <pre> * List&lt;Object&gt; logs = getLogs& * logsIngestionAsyncClient.upload& * .subscribe& * </pre> * <!-- end com.azure.monitor.ingestion.LogsIngestionAsyncClient.upload --> * * @param ruleId the data collection rule id that is configured to collect and transform the logs. * @param streamName the stream name configured in data collection rule that matches defines the structure of the * logs sent in this request. * @param logs the collection of logs to be uploaded. * @return the {@link Mono} that completes on completion of the upload request. * @throws NullPointerException if any of {@code ruleId}, {@code streamName} or {@code logs} are null. * @throws IllegalArgumentException if {@code logs} is empty. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> upload(String ruleId, String streamName, Iterable<Object> logs) { return upload(ruleId, streamName, logs, new UploadLogsOptions()); } /** * Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be * too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split * the input logs into multiple smaller requests before sending to the service. * * <p><strong>Upload logs to Azure Monitor</strong></p> * <!-- src_embed com.azure.monitor.ingestion.LogsIngestionAsyncClient.uploadWithConcurrency --> * <pre> * List&lt;Object&gt; logs = getLogs& * UploadLogsOptions uploadLogsOptions = new UploadLogsOptions& * logsIngestionAsyncClient.upload& * .subscribe& * </pre> * <!-- end com.azure.monitor.ingestion.LogsIngestionAsyncClient.uploadWithConcurrency --> * * @param ruleId the data collection rule id that is configured to collect and transform the logs. * @param streamName the stream name configured in data collection rule that matches defines the structure of the * logs sent in this request. * @param logs the collection of logs to be uploaded. * @param options the options to configure the upload request. * @return the {@link Mono} that completes on completion of the upload request. * @throws NullPointerException if any of {@code ruleId}, {@code streamName} or {@code logs} are null. * @throws IllegalArgumentException if {@code logs} is empty. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> upload(String ruleId, String streamName, Iterable<Object> logs, UploadLogsOptions options) { return withContext(context -> upload(ruleId, streamName, logs, options, context)); } /** * See error response code and error response message for more detail. * * <p><strong>Header Parameters</strong> * * <table border="1"> * <caption>Header Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>Content-Encoding</td><td>String</td><td>No</td><td>gzip</td></tr> * <tr><td>x-ms-client-request-id</td><td>String</td><td>No</td><td>Client request Id</td></tr> * </table> * * <p><strong>Request Body Schema</strong> * * <pre>{@code * [ * Object * ] * }</pre> * * @param ruleId The immutable Id of the Data Collection Rule resource. * @param streamName The streamDeclaration name as defined in the Data Collection Rule. * @param logs An array of objects matching the schema defined by the provided stream. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return the {@link Response} on successful completion of {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> uploadWithResponse( String ruleId, String streamName, BinaryData logs, RequestOptions requestOptions) { Objects.requireNonNull(ruleId, "'ruleId' cannot be null."); Objects.requireNonNull(streamName, "'streamName' cannot be null."); Objects.requireNonNull(logs, "'logs' cannot be null."); if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.addRequestCallback(request -> { HttpHeader httpHeader = request.getHeaders().get(CONTENT_ENCODING); if (httpHeader == null) { BinaryData gzippedRequest = BinaryData.fromBytes(gzipRequest(logs.toBytes())); request.setBody(gzippedRequest); request.setHeader(CONTENT_ENCODING, GZIP); } }); return service.uploadWithResponse(ruleId, streamName, logs, requestOptions); } Mono<Void> upload(String ruleId, String streamName, Iterable<Object> logs, UploadLogsOptions options, Context context) { return Mono.defer(() -> splitAndUpload(ruleId, streamName, logs, options, context)); } /** * This method splits the input logs into < 1MB HTTP requests and uploads to the Azure Monitor service. * @param ruleId The data collection rule id. * @param streamName The stream name configured in the data collection rule. * @param logs The input logs to upload. * @param options The options to configure the upload request. * @param context additional context that is passed through the Http pipeline during the service call. If no * additional context is required, pass {@link Context * @return the {@link Mono} that completes on completion of the upload request. */ private Mono<Void> splitAndUpload(String ruleId, String streamName, Iterable<Object> logs, UploadLogsOptions options, Context context) { int concurrency = 1; ObjectSerializer objectSerializer = DEFAULT_SERIALIZER; if (options != null) { if (options.getObjectSerializer() != null) { objectSerializer = options.getObjectSerializer(); } if (options.getMaxConcurrency() != null) { concurrency = options.getMaxConcurrency(); } } ObjectSerializer serializer = objectSerializer; Iterator<Object> iterator = logs.iterator(); return Flux.<LogsIngestionRequest>create(emitter -> createHttpRequest(serializer, iterator, emitter)) .flatMapSequential(request -> uploadToService(ruleId, streamName, context, request), concurrency) .<UploadLogsException>handle((responseHolder, sink) -> processResponse(options, responseHolder, sink)) .collectList() .handle((result, sink) -> processExceptions(result, sink)); } /** * This method converts raw {@link Object} logs into serialized and gzipped byte array that forms the request * body of the service call. * @param serializer The serializer used to serialize logs. * @param iterator The input logs interator. * @param emitter The Flux emitter to which the HTTP request is emitted. */ private void processExceptions(List<UploadLogsException> result, SynchronousSink<Void> sink) { long failedLogsCount = 0L; List<HttpResponseException> exceptions = new ArrayList<>(); for (UploadLogsException exception : result) { exceptions.addAll(exception.getUploadLogsErrors()); failedLogsCount += exception.getFailedLogsCount(); } if (!exceptions.isEmpty()) { sink.error(new UploadLogsException(exceptions, failedLogsCount)); } else { sink.complete(); } } private void processResponse(UploadLogsOptions options, UploadLogsResponseHolder responseHolder, SynchronousSink<UploadLogsException> sink) { if (responseHolder.getException() != null) { Consumer<UploadLogsError> uploadLogsErrorConsumer = null; if (options != null) { uploadLogsErrorConsumer = options.getUploadLogsErrorConsumer(); } if (uploadLogsErrorConsumer != null) { uploadLogsErrorConsumer.accept(new UploadLogsError(responseHolder.getException(), responseHolder.getRequest().getLogs())); return; } sink.next(new UploadLogsException(Collections.singletonList(responseHolder.getException()), responseHolder.getRequest().getLogs().size())); } } private Mono<UploadLogsResponseHolder> uploadToService(String ruleId, String streamName, Context context, LogsIngestionRequest request) { RequestOptions requestOptions = new RequestOptions() .addHeader(CONTENT_ENCODING, GZIP) .setContext(context); return service.uploadWithResponse(ruleId, streamName, BinaryData.fromBytes(request.getRequestBody()), requestOptions) .map(response -> new UploadLogsResponseHolder(null, null)) .onErrorResume(HttpResponseException.class, ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(request, ex))); } private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException { generator.writeRaw(serializedLogs.stream() .collect(Collectors.joining(","))); generator.writeEndArray(); generator.close(); } /** * Gzips the input byte array. * @param bytes The input byte array. * @return gzipped byte array. */ private byte[] gzipRequest(byte[] bytes) { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) { zip.write(bytes); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } return byteArrayOutputStream.toByteArray(); } }
class LogsIngestionAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionAsyncClient.class); private static final String CONTENT_ENCODING = "Content-Encoding"; private static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024; private static final String GZIP = "gzip"; private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true); private final IngestionUsingDataCollectionRulesAsyncClient service; LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) { this.service = service; } /** * Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be * too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split * the input logs into multiple smaller requests before sending to the service. * * <p><strong>Upload logs to Azure Monitor</strong></p> * <!-- src_embed com.azure.monitor.ingestion.LogsIngestionAsyncClient.upload --> * <pre> * List&lt;Object&gt; logs = getLogs& * logsIngestionAsyncClient.upload& * .subscribe& * </pre> * <!-- end com.azure.monitor.ingestion.LogsIngestionAsyncClient.upload --> * * @param ruleId the data collection rule id that is configured to collect and transform the logs. * @param streamName the stream name configured in data collection rule that matches defines the structure of the * logs sent in this request. * @param logs the collection of logs to be uploaded. * @return the {@link Mono} that completes on completion of the upload request. * @throws NullPointerException if any of {@code ruleId}, {@code streamName} or {@code logs} are null. * @throws IllegalArgumentException if {@code logs} is empty. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> upload(String ruleId, String streamName, Iterable<Object> logs) { return upload(ruleId, streamName, logs, new UploadLogsOptions()); } /** * Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be * too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split * the input logs into multiple smaller requests before sending to the service. * * <p><strong>Upload logs to Azure Monitor</strong></p> * <!-- src_embed com.azure.monitor.ingestion.LogsIngestionAsyncClient.uploadWithConcurrency --> * <pre> * List&lt;Object&gt; logs = getLogs& * UploadLogsOptions uploadLogsOptions = new UploadLogsOptions& * logsIngestionAsyncClient.upload& * .subscribe& * </pre> * <!-- end com.azure.monitor.ingestion.LogsIngestionAsyncClient.uploadWithConcurrency --> * * @param ruleId the data collection rule id that is configured to collect and transform the logs. * @param streamName the stream name configured in data collection rule that matches defines the structure of the * logs sent in this request. * @param logs the collection of logs to be uploaded. * @param options the options to configure the upload request. * @return the {@link Mono} that completes on completion of the upload request. * @throws NullPointerException if any of {@code ruleId}, {@code streamName} or {@code logs} are null. * @throws IllegalArgumentException if {@code logs} is empty. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> upload(String ruleId, String streamName, Iterable<Object> logs, UploadLogsOptions options) { return withContext(context -> upload(ruleId, streamName, logs, options, context)); } /** * See error response code and error response message for more detail. * * <p><strong>Header Parameters</strong> * * <table border="1"> * <caption>Header Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>Content-Encoding</td><td>String</td><td>No</td><td>gzip</td></tr> * <tr><td>x-ms-client-request-id</td><td>String</td><td>No</td><td>Client request Id</td></tr> * </table> * * <p><strong>Request Body Schema</strong> * * <pre>{@code * [ * Object * ] * }</pre> * * @param ruleId The immutable Id of the Data Collection Rule resource. * @param streamName The streamDeclaration name as defined in the Data Collection Rule. * @param logs An array of objects matching the schema defined by the provided stream. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @return the {@link Response} on successful completion of {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> uploadWithResponse( String ruleId, String streamName, BinaryData logs, RequestOptions requestOptions) { Objects.requireNonNull(ruleId, "'ruleId' cannot be null."); Objects.requireNonNull(streamName, "'streamName' cannot be null."); Objects.requireNonNull(logs, "'logs' cannot be null."); if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.addRequestCallback(request -> { HttpHeader httpHeader = request.getHeaders().get(CONTENT_ENCODING); if (httpHeader == null) { BinaryData gzippedRequest = BinaryData.fromBytes(gzipRequest(logs.toBytes())); request.setBody(gzippedRequest); request.setHeader(CONTENT_ENCODING, GZIP); } }); return service.uploadWithResponse(ruleId, streamName, logs, requestOptions); } Mono<Void> upload(String ruleId, String streamName, Iterable<Object> logs, UploadLogsOptions options, Context context) { return Mono.defer(() -> splitAndUpload(ruleId, streamName, logs, options, context)); } /** * This method splits the input logs into < 1MB HTTP requests and uploads to the Azure Monitor service. * @param ruleId The data collection rule id. * @param streamName The stream name configured in the data collection rule. * @param logs The input logs to upload. * @param options The options to configure the upload request. * @param context additional context that is passed through the Http pipeline during the service call. If no * additional context is required, pass {@link Context * @return the {@link Mono} that completes on completion of the upload request. */ private Mono<Void> splitAndUpload(String ruleId, String streamName, Iterable<Object> logs, UploadLogsOptions options, Context context) { int concurrency = 1; ObjectSerializer objectSerializer = DEFAULT_SERIALIZER; if (options != null) { if (options.getObjectSerializer() != null) { objectSerializer = options.getObjectSerializer(); } if (options.getMaxConcurrency() != null) { concurrency = options.getMaxConcurrency(); } } ObjectSerializer serializer = objectSerializer; Iterator<Object> iterator = logs.iterator(); return Flux.<LogsIngestionRequest>create(emitter -> createHttpRequest(serializer, iterator, emitter)) .flatMapSequential(request -> uploadToService(ruleId, streamName, context, request), concurrency) .<UploadLogsException>handle((responseHolder, sink) -> processResponse(options, responseHolder, sink)) .collectList() .handle((result, sink) -> processExceptions(result, sink)); } /** * This method converts raw {@link Object} logs into serialized and gzipped byte array that forms the request * body of the service call. * @param serializer The serializer used to serialize logs. * @param iterator The input logs interator. * @param emitter The Flux emitter to which the HTTP request is emitted. */ private void processExceptions(List<UploadLogsException> result, SynchronousSink<Void> sink) { long failedLogsCount = 0L; List<HttpResponseException> exceptions = new ArrayList<>(); for (UploadLogsException exception : result) { exceptions.addAll(exception.getUploadLogsErrors()); failedLogsCount += exception.getFailedLogsCount(); } if (!exceptions.isEmpty()) { sink.error(new UploadLogsException(exceptions, failedLogsCount)); } else { sink.complete(); } } private void processResponse(UploadLogsOptions options, UploadLogsResponseHolder responseHolder, SynchronousSink<UploadLogsException> sink) { if (responseHolder.getException() != null) { Consumer<UploadLogsError> uploadLogsErrorConsumer = null; if (options != null) { uploadLogsErrorConsumer = options.getUploadLogsErrorConsumer(); } if (uploadLogsErrorConsumer != null) { uploadLogsErrorConsumer.accept(new UploadLogsError(responseHolder.getException(), responseHolder.getRequest().getLogs())); return; } sink.next(new UploadLogsException(Collections.singletonList(responseHolder.getException()), responseHolder.getRequest().getLogs().size())); } } private Mono<UploadLogsResponseHolder> uploadToService(String ruleId, String streamName, Context context, LogsIngestionRequest request) { RequestOptions requestOptions = new RequestOptions() .addHeader(CONTENT_ENCODING, GZIP) .setContext(context); return service.uploadWithResponse(ruleId, streamName, BinaryData.fromBytes(request.getRequestBody()), requestOptions) .map(response -> new UploadLogsResponseHolder(null, null)) .onErrorResume(HttpResponseException.class, ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(request, ex))); } private void writeLogsAndCloseJsonGenerator(JsonGenerator generator, List<String> serializedLogs) throws IOException { generator.writeRaw(serializedLogs.stream() .collect(Collectors.joining(","))); generator.writeEndArray(); generator.close(); } /** * Gzips the input byte array. * @param bytes The input byte array. * @return gzipped byte array. */ private byte[] gzipRequest(byte[] bytes) { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) { zip.write(bytes); } catch (IOException exception) { throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); } return byteArrayOutputStream.toByteArray(); } }
added the sample
private void run() throws InterruptedException { LogsIngestionAsyncClient client = new LogsIngestionClientBuilder() .endpoint("<data-collection-endpoint>") .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); CountDownLatch countdownLatch = new CountDownLatch(1); List<Object> dataList = getLogs(); Mono<Void> resultMono = client.upload("<data-collection-rule-id>", "<stream-name>", dataList); resultMono.subscribe( ignored -> { }, error -> { if (error instanceof UploadLogsException) { UploadLogsException ex = (UploadLogsException) error; System.out.println("Failed to upload " + ex.getFailedLogsCount() + "logs."); } }, countdownLatch::countDown); countdownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); }
Mono<Void> resultMono = client.upload("<data-collection-rule-id>",
private void run() throws InterruptedException { LogsIngestionAsyncClient client = new LogsIngestionClientBuilder() .endpoint("<data-collection-endpoint>") .credential(new DefaultAzureCredentialBuilder().build()) .buildAsyncClient(); CountDownLatch countdownLatch = new CountDownLatch(1); List<Object> dataList = getLogs(); Mono<Void> resultMono = client.upload("<data-collection-rule-id>", "<stream-name>", dataList); resultMono.subscribe( ignored -> { }, error -> { if (error instanceof UploadLogsException) { UploadLogsException ex = (UploadLogsException) error; System.out.println("Failed to upload " + ex.getFailedLogsCount() + "logs."); } }, countdownLatch::countDown); countdownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); }
class UploadLogsAsyncClientSample { private static final Duration TIMEOUT = Duration.ofSeconds(10); /** * Main method to run the sample. * @param args ignore args. */ public static void main(String[] args) throws InterruptedException { UploadLogsAsyncClientSample sample = new UploadLogsAsyncClientSample(); sample.run(); } private static List<Object> getLogs() { List<Object> logs = new ArrayList<>(); for (int i = 0; i < 10; i++) { CustomLogData e = new CustomLogData() .setTime(OffsetDateTime.now()) .setExtendedColumn("extend column data" + i); logs.add(e); } return logs; } }
class UploadLogsAsyncClientSample { private static final Duration TIMEOUT = Duration.ofSeconds(10); /** * Main method to run the sample. * @param args ignore args. */ public static void main(String[] args) throws InterruptedException { UploadLogsAsyncClientSample sample = new UploadLogsAsyncClientSample(); sample.run(); } private static List<Object> getLogs() { List<Object> logs = new ArrayList<>(); for (int i = 0; i < 10; i++) { CustomLogData e = new CustomLogData() .setTime(OffsetDateTime.now()) .setExtendedColumn("extend column data" + i); logs.add(e); } return logs; } }
IIUC, for the scenario where the broker detaches with an actual error condition (which LinkHandler maps to client-side AmqpException), the SessionProcessor already has a mechanism to recover at a higher level. If that's the case, should we let that mechanism take care of recovery in this additional scenario as well? (Because we already mapped detach without error condition to AmqpException via the above switchIfEmpty.) Looking at the existing logic, the _retryWhen_ was originally dedicated to handling only the "timeout" case. Broker returning timeout seems to be a special case for "session enabled link". So by changing this _retryWhen_ to handle ALL AmqpException we're changing the current behavior, i.e., stopping exceptions those used to reach and run logic (which may have more than just re-requesting a link) in higher-level recovery.
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).isTransient()) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure); } }))); }
&& ((AmqpException) failure).isTransient()) {
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
thank you, this operator chain looks what we want!
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).isTransient()) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure); } }))); }
new AmqpException(true, "Session receive link completed without being active", null)))
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
@anuchandy How often will this chain be called? I was thinking back to where we had that high memory usage because it was eagerly assembling and creating the exception.... Do we need to consider `Mono.defer(() -> Mono.error(...))`?
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).isTransient()) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure); } }))); }
new AmqpException(true, "Session receive link completed without being active", null)))
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
Hi @conniey - Looking at the code, my understanding is - Initially, there is going to be a max-concurrent-session instantiation of this chain in one shot. After that, this chain will be instantiated each time there is a session disconnect. That looks like a relatively low object creation rate in typical cases, provided the broker won't disconnect that often. If not enough requested sessions are available in the broker, the corresponding sessions acquire retry attempts going to instantiate that many chains concurrently. I think, in the past, that high memory consumption was when we were using "Mono.just(item)" and "Mono.error(new Error)". At that time we replaced "Mono.just(item)" with its delayed version, "Mono.defer(() -> just(item))". The PR [reference](https://github.com/Azure/azure-sdk-for-java/pull/26373). The "Mono.just()" becomes a bottleneck when the creation rate was ~1000 objects per second (while sending ~1000 events / sec) In this case, we use "Mono.error(() -> new Error())" which is a delayed version of "Mono.error(new Error())". Which should be equivalent of what you mentioned "Mono.defer(() -> Mono.error(...))"
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).isTransient()) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure); } }))); }
new AmqpException(true, "Session receive link completed without being active", null)))
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
Nice! Thank you for clarifying. :)
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).isTransient()) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure); } }))); }
new AmqpException(true, "Session receive link completed without being active", null)))
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
Thanks @anuchandy, I also thought a lot about whether we need to add this transient error check for retrying on the link-level. I was refering [the error handling of ServiceBusReceiveLinkProcessor (SBRLP)](https://github.com/Azure/azure-sdk-for-java/blob/62d29b71ae2a8485dae1f3fa14e40f37fdcc2d4a/sdk/servicebus/azure-messaging-servicebus/src/main/java/com/azure/messaging/servicebus/implementation/ServiceBusReceiveLinkProcessor.java#L340-L357), when it is a transient error, SBRLP will retry to request a new link rather than throw to higher level. On the other hand, if we throw the transient error to SessionProcessor, the processor will [restart the client as a recovery](https://github.com/Azure/azure-sdk-for-java/blob/3ef6d6efc51b692d7e0c5716c4e5a2245b957e3e/sdk/servicebus/azure-messaging-servicebus/src/main/java/com/azure/messaging/servicebus/ServiceBusProcessorClient.java#L406-L410), not sure if it is too heavy. So I decided to add a transient error check for session link recovery. However, for this specific case, the error transient flag for the error is set by us. Actually adding this transient error check may change the existing behavior and bring in unexpected error. Maybe I should create another issue to add the transient check and do the validation. For this PR, I can let the session processor do the recovering as it has less impact.
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).isTransient()) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure); } }))); }
&& ((AmqpException) failure).isTransient()) {
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
Sounds good Kun @liukun-msft. Yes, Agree with you let's create a work item for that. For now, let's have minimal change to fix this endpoint completion case.
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).isTransient()) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure); } }))); }
&& ((AmqpException) failure).isTransient()) {
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
I don't have context about the current design choice of propagating the error to the session processor, may be inherited from the legacy library.
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).isTransient()) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure); } }))); }
&& ((AmqpException) failure).isTransient()) {
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
But I suspect the reason for the design could be - the SB session concept allows load balancing of the client-side processing by distributing the messages across sessions. It could be that the typical case is to spread the messages across sessions evenly to maximize such parallelism. So, suppose one session disconnect due to no message, no message read (i.e., idle-timeout), service busy, service update, or connection error. In that case, it's likely that other sessions to the same entity are going be disconnected for the same reason. This common use case might be one reason for the current design choice, which simplifies the recovery by avoiding the need to coordinate all those threads across sessions trying to recover locally.
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).isTransient()) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure); } }))); }
&& ((AmqpException) failure).isTransient()) {
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
nit; also, if possible, please update the code comment something like _While waiting for the link to ACTIVE, if the broker detaches the link without an error condition, the link-endpoint-state publisher will transition to completion without ever emitting ACTIVE. Map such publisher completion to transient (i.e., retriable) AmqpException to enable processor recovery._ (+ changelog in similar line) I think what is important to call out for other dev is that - the completion is causing this, any other non-ACTIVE state doesn't matter.
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).isTransient()) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure); } }))); }
&& ((AmqpException) failure).isTransient()) {
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
Sure, I'll update the code comment. I found that if we throw the error to session processor level, because the recover process need to close client, which contains the block calls, and the link-endpoint-state publisher emit the signal on reactor-exector thread, the session processor recovery would fail with an error "block()/... is not allowed on the reactor-exector thread". This `getActiveLink()` is also used for `ServiceBusSessionReceiverAsyncClient` to accept a session, and user could handle if an error throw out. But for the session processor, I suspect when we reused the `getActiveLink()`, we didn't consider this error path and validate it. We have to publish the `Mono.<Long>error(failure)` on a blocking thread so that it can be handled by the session processor.
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).isTransient()) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure); } }))); }
&& ((AmqpException) failure).isTransient()) {
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
Hi @anuchandy I suspect we haven't tested this error path before when using session processor. Currently, I think publish the error on the boundedElastic thread may involve less side effect, do you have any other ideals to avoid processor restart on the non-blocking thread? If we agree to add the publishOn(Schedulers.boundedElastic()) here, we may also need to add for [the disposed error](https://github.com/Azure/azure-sdk-for-java/blob/dc387943322d149460803a0f2911fd872d8e45b7/sdk/servicebus/azure-messaging-servicebus/src/main/java/com/azure/messaging/servicebus/ServiceBusSessionManager.java#L296-L298).
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic()); } }))); }
return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic());
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
Hi Kun @liukun-msft, the apporach you coded looks good to me. Given this is a rare & corner case, and we rely on the bounded thread pool for recovery, It would be helpful to have info logging to detect potential thread starvation by the pool. Something like, have the first log emission once we hit the error signal, then the second log emission once the error signal is picked by the bounded-elastic thread. If the receiver hangs with the first log entry, but without the second log entry from the bounded thread, that indicates thread starvation. Hi Connie @conniey, I'm thinking about the offline chat we had on detecting thread starvation in the recovery route relying on the bounded pool; this is one of the routes. Tagging you if you have some thoughts.
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic()); } }))); }
return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic());
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
Sure, I'll add the info logs for tracking purpose. Do we have some standard log format for the tracking starvation issue? it can help us search and remove the logs once we resolve this problem in future. If no, we can take this as a start.
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic()); } }))); }
return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic());
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
Hmm..I think one stardard log format might not be achievable for all the cases. Maybe we can just include the keywords "bounded-elastic thread" for searching purpose. I simply added one log here, we can enhance it if we have more thoughts.
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic()); } }))); }
return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic());
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
We don't have any standard defined; as you mentioned, it's case by case. For this specific case, I was thinking something like (please review/consider if there is a better format) ```java final String id = "trackingId:" + System.nanoTime(); logger.info("Unable to acquire new session. {}", id, error); Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic()).doOnError(e -> { logger.info("Emitting the error signal received for session acquire attempt. {}", id, error); }); ```
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic()); } }))); }
return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic());
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
And the logger should take care of printing the thread name/id
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic()); } }))); }
return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic());
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
Yes, add a tracking id in log is better if we need to handle multiple sessions 👍
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic()); } }))); }
return Mono.<Long>error(failure).publishOn(Schedulers.boundedElastic());
Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .filter(e -> e == AmqpEndpointState.ACTIVE) .next() .switchIfEmpty(Mono.error(() -> new AmqpException(true, "Session receive link completed without being active", null))) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); LOGGER.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue("attempt", signal.totalRetriesInARow()) .log("Error occurred while getting unnamed session.", failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { final long id = System.nanoTime(); LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Unable to acquire new session.", failure); return Mono.<Long>error(failure) .publishOn(Schedulers.boundedElastic()) .doOnError(e -> LOGGER.atInfo() .addKeyValue(TRACKING_ID_KEY, id) .log("Emitting the error signal received for session acquire attempt.", e) ); } }))); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private static final String TRACKING_ID_KEY = "trackingId"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final MessageSerializer messageSerializer; private final String identifier; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink, String identifier) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); this.identifier = identifier; final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, String identifier) { this(entityPath, entityType, connectionProcessor, messageSerializer, receiverOptions, null, identifier); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the identifier of the instance of {@link ServiceBusSessionManager}. * * @return The identifier that can identify the instance of {@link ServiceBusSessionManager}. */ public String getIdentifier() { return this.identifier; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } final List<Mono<Void>> closeables = sessionReceivers.values().stream() .map(receiver -> receiver.closeAsync()) .collect(Collectors.toList()); Mono.when(closeables).block(operationTimeout); sessionReceiveSink.complete(); for (Scheduler scheduler : schedulers) { scheduler.dispose(); } } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { LOGGER.atVerbose() .addKeyValue(SESSION_ID_KEY, sessionReceiver.getSessionId()) .log("Closing session receiver."); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.closeAsync().subscribe(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })); } private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { LOGGER.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("Requested unnamed sessions."); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { LOGGER.atVerbose() .addKeyValue(NUMBER_OF_REQUESTED_MESSAGES_KEY, request) .log("There are no available schedulers to fetch."); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(LOGGER, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(LOGGER, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
`for (String notAction : permission.dataActions())` -> `for (String dataAction : permission.dataActions())`
public static String getSecondaryServicePrincipalClientID(String envSecondaryServicePrincipal) throws IOException { String content = new String(Files.readAllBytes(new File(envSecondaryServicePrincipal).toPath()), StandardCharsets.UTF_8).trim(); HashMap<String, String> auth = new HashMap<>(); if (content.startsWith("{")) { auth = new JacksonAdapter().deserialize(content, auth.getClass(), SerializerEncoding.JSON); return auth.get("clientId"); } else { Properties authSettings = new Properties(); try (FileInputStream credentialsFileStream = new FileInputStream(new File(envSecondaryServicePrincipal))) { authSettings.load(credentialsFileStream); } return authSettings.getProperty("client"); } } /** * Retrieve the secondary service principal secret. * * @param envSecondaryServicePrincipal an Azure Container Registry * @return a service principal secret * @throws IOException exception */ public static String getSecondaryServicePrincipalSecret(String envSecondaryServicePrincipal) throws IOException { String content = new String(Files.readAllBytes(new File(envSecondaryServicePrincipal).toPath()), StandardCharsets.UTF_8).trim(); HashMap<String, String> auth = new HashMap<>(); if (content.startsWith("{")) { auth = new JacksonAdapter().deserialize(content, auth.getClass(), SerializerEncoding.JSON); return auth.get("clientSecret"); } else { Properties authSettings = new Properties(); try (FileInputStream credentialsFileStream = new FileInputStream(new File(envSecondaryServicePrincipal))) { authSettings.load(credentialsFileStream); } return authSettings.getProperty("key"); } } /** * This method creates a certificate for given password. * * @param certPath location of certificate file * @param pfxPath location of pfx file * @param alias User alias * @param password alias password * @param cnName domain name * @param dnsName dns name in subject alternate name * @throws IOException IO Exception */ public static void createCertificate(String certPath, String pfxPath, String alias, String password, String cnName, String dnsName) throws IOException { if (new File(pfxPath).exists()) { return; } String validityInDays = "3650"; String keyAlg = "RSA"; String sigAlg = "SHA1withRSA"; String keySize = "2048"; String storeType = "pkcs12"; String command = "keytool"; String jdkPath = System.getProperty("java.home"); if (jdkPath != null && !jdkPath.isEmpty()) { jdkPath = jdkPath.concat("\\bin"); if (new File(jdkPath).isDirectory()) { command = String.format("%s%s%s", jdkPath, File.separator, command); } } else { return; } String[] commandArgs = {command, "-genkey", "-alias", alias, "-keystore", pfxPath, "-storepass", password, "-validity", validityInDays, "-keyalg", keyAlg, "-sigalg", sigAlg, "-keysize", keySize, "-storetype", storeType, "-dname", "CN=" + cnName, "-ext", "EKU=1.3.6.1.5.5.7.3.1"}; if (dnsName != null) { List<String> args = new ArrayList<>(Arrays.asList(commandArgs)); args.add("-ext"); args.add("san=dns:" + dnsName); commandArgs = args.toArray(new String[0]); } Utils.cmdInvocation(commandArgs, true); File pfxFile = new File(pfxPath); if (pfxFile.exists()) { String[] certCommandArgs = {command, "-export", "-alias", alias, "-storetype", storeType, "-keystore", pfxPath, "-storepass", password, "-rfc", "-file", certPath}; Utils.cmdInvocation(certCommandArgs, true); File cerFile = new File(pfxPath); if (!cerFile.exists()) { throw new IOException( "Error occurred while creating certificate" + String.join(" ", certCommandArgs)); } } else { throw new IOException("Error occurred while creating certificates" + String.join(" ", commandArgs)); } } /** * This method is used for invoking native commands. * * @param command :- command to invoke. * @param ignoreErrorStream : Boolean which controls whether to throw exception or not * based on error stream. * @return result :- depending on the method invocation. * @throws IOException exceptions thrown from the execution */ public static String cmdInvocation(String[] command, boolean ignoreErrorStream) throws IOException { String result = ""; String error = ""; Process process = new ProcessBuilder(command).start(); try ( InputStream inputStream = process.getInputStream(); InputStream errorStream = process.getErrorStream(); BufferedReader br = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); BufferedReader ebr = new BufferedReader(new InputStreamReader(errorStream, StandardCharsets.UTF_8)); ) { result = br.readLine(); process.waitFor(); error = ebr.readLine(); if (error != null && (!"".equals(error))) { if (!ignoreErrorStream) { throw new IOException(error, null); } } } catch (Exception e) { throw new RuntimeException("Exception occurred while invoking command", e); } return result; } /** * Prints information for passed SQL Server. * * @param sqlServer sqlServer to be printed */ public static void print(SqlServer sqlServer) { StringBuilder builder = new StringBuilder().append("Sql Server: ").append(sqlServer.id()) .append("Name: ").append(sqlServer.name()) .append("\n\tResource group: ").append(sqlServer.resourceGroupName()) .append("\n\tRegion: ").append(sqlServer.region()) .append("\n\tSqlServer version: ").append(sqlServer.version()) .append("\n\tFully qualified name for Sql Server: ").append(sqlServer.fullyQualifiedDomainName()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL Database. * * @param database database to be printed */ public static void print(SqlDatabase database) { StringBuilder builder = new StringBuilder().append("Sql Database: ").append(database.id()) .append("Name: ").append(database.name()) .append("\n\tResource group: ").append(database.resourceGroupName()) .append("\n\tRegion: ").append(database.region()) .append("\n\tSqlServer Name: ").append(database.sqlServerName()) .append("\n\tEdition of SQL database: ").append(database.edition()) .append("\n\tCollation of SQL database: ").append(database.collation()) .append("\n\tCreation date of SQL database: ").append(database.creationDate()) .append("\n\tIs data warehouse: ").append(database.isDataWarehouse()) .append("\n\tRequested service objective of SQL database: ").append(database.requestedServiceObjectiveName()) .append("\n\tName of current service objective of SQL database: ").append(database.currentServiceObjectiveName()) .append("\n\tMax size bytes of SQL database: ").append(database.maxSizeBytes()) .append("\n\tDefault secondary location of SQL database: ").append(database.defaultSecondaryLocation()); System.out.println(builder.toString()); } /** * Prints information for the passed firewall rule. * * @param firewallRule firewall rule to be printed. */ public static void print(SqlFirewallRule firewallRule) { StringBuilder builder = new StringBuilder().append("Sql firewall rule: ").append(firewallRule.id()) .append("Name: ").append(firewallRule.name()) .append("\n\tResource group: ").append(firewallRule.resourceGroupName()) .append("\n\tRegion: ").append(firewallRule.region()) .append("\n\tSqlServer Name: ").append(firewallRule.sqlServerName()) .append("\n\tStart IP Address of the firewall rule: ").append(firewallRule.startIpAddress()) .append("\n\tEnd IP Address of the firewall rule: ").append(firewallRule.endIpAddress()); System.out.println(builder.toString()); } /** * Prints information for the passed virtual network rule. * * @param virtualNetworkRule virtual network rule to be printed. */ public static void print(SqlVirtualNetworkRule virtualNetworkRule) { StringBuilder builder = new StringBuilder().append("SQL virtual network rule: ").append(virtualNetworkRule.id()) .append("Name: ").append(virtualNetworkRule.name()) .append("\n\tResource group: ").append(virtualNetworkRule.resourceGroupName()) .append("\n\tSqlServer Name: ").append(virtualNetworkRule.sqlServerName()) .append("\n\tSubnet ID: ").append(virtualNetworkRule.subnetId()) .append("\n\tState: ").append(virtualNetworkRule.state()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL subscription usage metric. * * @param subscriptionUsageMetric metric to be printed. */ public static void print(SqlSubscriptionUsageMetric subscriptionUsageMetric) { StringBuilder builder = new StringBuilder().append("SQL Subscription Usage Metric: ").append(subscriptionUsageMetric.id()) .append("Name: ").append(subscriptionUsageMetric.name()) .append("\n\tDisplay Name: ").append(subscriptionUsageMetric.displayName()) .append("\n\tCurrent Value: ").append(subscriptionUsageMetric.currentValue()) .append("\n\tLimit: ").append(subscriptionUsageMetric.limit()) .append("\n\tUnit: ").append(subscriptionUsageMetric.unit()) .append("\n\tType: ").append(subscriptionUsageMetric.type()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL database usage metric. * * @param dbUsageMetric metric to be printed. */ public static void print(SqlDatabaseUsageMetric dbUsageMetric) { StringBuilder builder = new StringBuilder().append("SQL Database Usage Metric") .append("Name: ").append(dbUsageMetric.name()) .append("\n\tDisplay Name: ").append(dbUsageMetric.displayName()) .append("\n\tCurrent Value: ").append(dbUsageMetric.currentValue()) .append("\n\tLimit: ").append(dbUsageMetric.limit()) .append("\n\tUnit: ").append(dbUsageMetric.unit()); System.out.println(builder.toString()); } /** * Prints information for the passed Failover Group. * * @param failoverGroup the SQL Failover Group to be printed. */ public static void print(SqlFailoverGroup failoverGroup) { StringBuilder builder = new StringBuilder().append("SQL Failover Group: ").append(failoverGroup.id()) .append("Name: ").append(failoverGroup.name()) .append("\n\tResource group: ").append(failoverGroup.resourceGroupName()) .append("\n\tSqlServer Name: ").append(failoverGroup.sqlServerName()) .append("\n\tRead-write endpoint policy: ").append(failoverGroup.readWriteEndpointPolicy()) .append("\n\tData loss grace period: ").append(failoverGroup.readWriteEndpointDataLossGracePeriodMinutes()) .append("\n\tRead-only endpoint policy: ").append(failoverGroup.readOnlyEndpointPolicy()) .append("\n\tReplication state: ").append(failoverGroup.replicationState()) .append("\n\tReplication role: ").append(failoverGroup.replicationRole()); builder.append("\n\tPartner Servers: "); for (PartnerInfo item : failoverGroup.partnerServers()) { builder .append("\n\t\tId: ").append(item.id()) .append("\n\t\tLocation: ").append(item.location()) .append("\n\t\tReplication role: ").append(item.replicationRole()); } builder.append("\n\tDatabases: "); for (String databaseId : failoverGroup.databases()) { builder.append("\n\t\tID: ").append(databaseId); } System.out.println(builder.toString()); } /** * Prints information for the passed SQL server key. * * @param serverKey virtual network rule to be printed. */ public static void print(SqlServerKey serverKey) { StringBuilder builder = new StringBuilder().append("SQL server key: ").append(serverKey.id()) .append("Name: ").append(serverKey.name()) .append("\n\tResource group: ").append(serverKey.resourceGroupName()) .append("\n\tSqlServer Name: ").append(serverKey.sqlServerName()) .append("\n\tRegion: ").append(serverKey.region() != null ? serverKey.region().name() : "") .append("\n\tServer Key Type: ").append(serverKey.serverKeyType()) .append("\n\tServer Key URI: ").append(serverKey.uri()) .append("\n\tServer Key Thumbprint: ").append(serverKey.thumbprint()) .append("\n\tServer Key Creation Date: ").append(serverKey.creationDate() != null ? serverKey.creationDate().toString() : ""); System.out.println(builder.toString()); } /** * Prints information of the elastic pool passed in. * * @param elasticPool elastic pool to be printed */ public static void print(SqlElasticPool elasticPool) { StringBuilder builder = new StringBuilder().append("Sql elastic pool: ").append(elasticPool.id()) .append("Name: ").append(elasticPool.name()) .append("\n\tResource group: ").append(elasticPool.resourceGroupName()) .append("\n\tRegion: ").append(elasticPool.region()) .append("\n\tSqlServer Name: ").append(elasticPool.sqlServerName()) .append("\n\tEdition of elastic pool: ").append(elasticPool.edition()) .append("\n\tTotal number of DTUs in the elastic pool: ").append(elasticPool.dtu()) .append("\n\tMaximum DTUs a database can get in elastic pool: ").append(elasticPool.databaseDtuMax()) .append("\n\tMinimum DTUs a database is guaranteed in elastic pool: ").append(elasticPool.databaseDtuMin()) .append("\n\tCreation date for the elastic pool: ").append(elasticPool.creationDate()) .append("\n\tState of the elastic pool: ").append(elasticPool.state()) .append("\n\tStorage capacity in MBs for the elastic pool: ").append(elasticPool.storageCapacity()); System.out.println(builder.toString()); } /** * Prints information of the elastic pool activity. * * @param elasticPoolActivity elastic pool activity to be printed */ public static void print(ElasticPoolActivity elasticPoolActivity) { StringBuilder builder = new StringBuilder().append("Sql elastic pool activity: ").append(elasticPoolActivity.id()) .append("Name: ").append(elasticPoolActivity.name()) .append("\n\tResource group: ").append(elasticPoolActivity.resourceGroupName()) .append("\n\tState: ").append(elasticPoolActivity.state()) .append("\n\tElastic pool name: ").append(elasticPoolActivity.elasticPoolName()) .append("\n\tStart time of activity: ").append(elasticPoolActivity.startTime()) .append("\n\tEnd time of activity: ").append(elasticPoolActivity.endTime()) .append("\n\tError code of activity: ").append(elasticPoolActivity.errorCode()) .append("\n\tError message of activity: ").append(elasticPoolActivity.errorMessage()) .append("\n\tError severity of activity: ").append(elasticPoolActivity.errorSeverity()) .append("\n\tOperation: ").append(elasticPoolActivity.operation()) .append("\n\tCompleted percentage of activity: ").append(elasticPoolActivity.percentComplete()); System.out.println(builder.toString()); } /** * Print an application gateway. * * @param resource an application gateway */ public static void print(ApplicationGateway resource) { StringBuilder info = new StringBuilder(); info.append("Application gateway: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tSKU: ").append(resource.sku().toString()) .append("\n\tOperational state: ").append(resource.operationalState()) .append("\n\tInternet-facing? ").append(resource.isPublic()) .append("\n\tInternal? ").append(resource.isPrivate()) .append("\n\tDefault private IP address: ").append(resource.privateIpAddress()) .append("\n\tPrivate IP address allocation method: ").append(resource.privateIpAllocationMethod()) .append("\n\tDisabled SSL protocols: ").append(resource.disabledSslProtocols().toString()); Map<String, ApplicationGatewayIpConfiguration> ipConfigs = resource.ipConfigurations(); info.append("\n\tIP configurations: ").append(ipConfigs.size()); for (ApplicationGatewayIpConfiguration ipConfig : ipConfigs.values()) { info.append("\n\t\tName: ").append(ipConfig.name()) .append("\n\t\t\tNetwork id: ").append(ipConfig.networkId()) .append("\n\t\t\tSubnet name: ").append(ipConfig.subnetName()); } Map<String, ApplicationGatewayFrontend> frontends = resource.frontends(); info.append("\n\tFrontends: ").append(frontends.size()); for (ApplicationGatewayFrontend frontend : frontends.values()) { info.append("\n\t\tName: ").append(frontend.name()) .append("\n\t\t\tPublic? ").append(frontend.isPublic()); if (frontend.isPublic()) { info.append("\n\t\t\tPublic IP address ID: ").append(frontend.publicIpAddressId()); } if (frontend.isPrivate()) { info.append("\n\t\t\tPrivate IP address: ").append(frontend.privateIpAddress()) .append("\n\t\t\tPrivate IP allocation method: ").append(frontend.privateIpAllocationMethod()) .append("\n\t\t\tSubnet name: ").append(frontend.subnetName()) .append("\n\t\t\tVirtual network ID: ").append(frontend.networkId()); } } Map<String, ApplicationGatewayBackend> backends = resource.backends(); info.append("\n\tBackends: ").append(backends.size()); for (ApplicationGatewayBackend backend : backends.values()) { info.append("\n\t\tName: ").append(backend.name()) .append("\n\t\t\tAssociated NIC IP configuration IDs: ").append(backend.backendNicIPConfigurationNames().keySet()); Collection<ApplicationGatewayBackendAddress> addresses = backend.addresses(); info.append("\n\t\t\tAddresses: ").append(addresses.size()); for (ApplicationGatewayBackendAddress address : addresses) { info.append("\n\t\t\t\tFQDN: ").append(address.fqdn()) .append("\n\t\t\t\tIP: ").append(address.ipAddress()); } } Map<String, ApplicationGatewayBackendHttpConfiguration> httpConfigs = resource.backendHttpConfigurations(); info.append("\n\tHTTP Configurations: ").append(httpConfigs.size()); for (ApplicationGatewayBackendHttpConfiguration httpConfig : httpConfigs.values()) { info.append("\n\t\tName: ").append(httpConfig.name()) .append("\n\t\t\tCookie based affinity: ").append(httpConfig.cookieBasedAffinity()) .append("\n\t\t\tPort: ").append(httpConfig.port()) .append("\n\t\t\tRequest timeout in seconds: ").append(httpConfig.requestTimeout()) .append("\n\t\t\tProtocol: ").append(httpConfig.protocol()) .append("\n\t\tHost header: ").append(httpConfig.hostHeader()) .append("\n\t\tHost header comes from backend? ").append(httpConfig.isHostHeaderFromBackend()) .append("\n\t\tConnection draining timeout in seconds: ").append(httpConfig.connectionDrainingTimeoutInSeconds()) .append("\n\t\tAffinity cookie name: ").append(httpConfig.affinityCookieName()) .append("\n\t\tPath: ").append(httpConfig.path()); ApplicationGatewayProbe probe = httpConfig.probe(); if (probe != null) { info.append("\n\t\tProbe: " + probe.name()); } info.append("\n\t\tIs probe enabled? ").append(httpConfig.isProbeEnabled()); } Map<String, ApplicationGatewaySslCertificate> sslCerts = resource.sslCertificates(); info.append("\n\tSSL certificates: ").append(sslCerts.size()); for (ApplicationGatewaySslCertificate cert : sslCerts.values()) { info.append("\n\t\tName: ").append(cert.name()) .append("\n\t\t\tCert data: ").append(cert.publicData()); } Map<String, ApplicationGatewayRedirectConfiguration> redirects = resource.redirectConfigurations(); info.append("\n\tRedirect configurations: ").append(redirects.size()); for (ApplicationGatewayRedirectConfiguration redirect : redirects.values()) { info.append("\n\t\tName: ").append(redirect.name()) .append("\n\t\tTarget URL: ").append(redirect.type()) .append("\n\t\tTarget URL: ").append(redirect.targetUrl()) .append("\n\t\tTarget listener: ").append(redirect.targetListener() != null ? redirect.targetListener().name() : null) .append("\n\t\tIs path included? ").append(redirect.isPathIncluded()) .append("\n\t\tIs query string included? ").append(redirect.isQueryStringIncluded()) .append("\n\t\tReferencing request routing rules: ").append(redirect.requestRoutingRules().values()); } Map<String, ApplicationGatewayListener> listeners = resource.listeners(); info.append("\n\tHTTP listeners: ").append(listeners.size()); for (ApplicationGatewayListener listener : listeners.values()) { info.append("\n\t\tName: ").append(listener.name()) .append("\n\t\t\tHost name: ").append(listener.hostname()) .append("\n\t\t\tServer name indication required? ").append(listener.requiresServerNameIndication()) .append("\n\t\t\tAssociated frontend name: ").append(listener.frontend().name()) .append("\n\t\t\tFrontend port name: ").append(listener.frontendPortName()) .append("\n\t\t\tFrontend port number: ").append(listener.frontendPortNumber()) .append("\n\t\t\tProtocol: ").append(listener.protocol().toString()); if (listener.sslCertificate() != null) { info.append("\n\t\t\tAssociated SSL certificate: ").append(listener.sslCertificate().name()); } } Map<String, ApplicationGatewayProbe> probes = resource.probes(); info.append("\n\tProbes: ").append(probes.size()); for (ApplicationGatewayProbe probe : probes.values()) { info.append("\n\t\tName: ").append(probe.name()) .append("\n\t\tProtocol:").append(probe.protocol().toString()) .append("\n\t\tInterval in seconds: ").append(probe.timeBetweenProbesInSeconds()) .append("\n\t\tRetries: ").append(probe.retriesBeforeUnhealthy()) .append("\n\t\tTimeout: ").append(probe.timeoutInSeconds()) .append("\n\t\tHost: ").append(probe.host()) .append("\n\t\tHealthy HTTP response status code ranges: ").append(probe.healthyHttpResponseStatusCodeRanges()) .append("\n\t\tHealthy HTTP response body contents: ").append(probe.healthyHttpResponseBodyContents()); } Map<String, ApplicationGatewayRequestRoutingRule> rules = resource.requestRoutingRules(); info.append("\n\tRequest routing rules: ").append(rules.size()); for (ApplicationGatewayRequestRoutingRule rule : rules.values()) { info.append("\n\t\tName: ").append(rule.name()) .append("\n\t\tType: ").append(rule.ruleType()) .append("\n\t\tPublic IP address ID: ").append(rule.publicIpAddressId()) .append("\n\t\tHost name: ").append(rule.hostname()) .append("\n\t\tServer name indication required? ").append(rule.requiresServerNameIndication()) .append("\n\t\tFrontend port: ").append(rule.frontendPort()) .append("\n\t\tFrontend protocol: ").append(rule.frontendProtocol().toString()) .append("\n\t\tBackend port: ").append(rule.backendPort()) .append("\n\t\tCookie based affinity enabled? ").append(rule.cookieBasedAffinity()) .append("\n\t\tRedirect configuration: ").append(rule.redirectConfiguration() != null ? rule.redirectConfiguration().name() : "(none)"); Collection<ApplicationGatewayBackendAddress> addresses = rule.backendAddresses(); info.append("\n\t\t\tBackend addresses: ").append(addresses.size()); for (ApplicationGatewayBackendAddress address : addresses) { info.append("\n\t\t\t\t") .append(address.fqdn()) .append(" [").append(address.ipAddress()).append("]"); } info.append("\n\t\t\tSSL certificate name: "); ApplicationGatewaySslCertificate cert = rule.sslCertificate(); if (cert == null) { info.append("(None)"); } else { info.append(cert.name()); } info.append("\n\t\t\tAssociated backend address pool: "); ApplicationGatewayBackend backend = rule.backend(); if (backend == null) { info.append("(None)"); } else { info.append(backend.name()); } info.append("\n\t\t\tAssociated backend HTTP settings configuration: "); ApplicationGatewayBackendHttpConfiguration config = rule.backendHttpConfiguration(); if (config == null) { info.append("(None)"); } else { info.append(config.name()); } info.append("\n\t\t\tAssociated frontend listener: "); ApplicationGatewayListener listener = rule.listener(); if (listener == null) { info.append("(None)"); } else { info.append(config.name()); } } System.out.println(info.toString()); } /** * Prints information of a virtual machine custom image. * * @param image the image */ public static void print(VirtualMachineCustomImage image) { StringBuilder builder = new StringBuilder().append("Virtual machine custom image: ").append(image.id()) .append("Name: ").append(image.name()) .append("\n\tResource group: ").append(image.resourceGroupName()) .append("\n\tCreated from virtual machine: ").append(image.sourceVirtualMachineId()); builder.append("\n\tOS disk image: ") .append("\n\t\tOperating system: ").append(image.osDiskImage().osType()) .append("\n\t\tOperating system state: ").append(image.osDiskImage().osState()) .append("\n\t\tCaching: ").append(image.osDiskImage().caching()) .append("\n\t\tSize (GB): ").append(image.osDiskImage().diskSizeGB()); if (image.isCreatedFromVirtualMachine()) { builder.append("\n\t\tSource virtual machine: ").append(image.sourceVirtualMachineId()); } if (image.osDiskImage().managedDisk() != null) { builder.append("\n\t\tSource managed disk: ").append(image.osDiskImage().managedDisk().id()); } if (image.osDiskImage().snapshot() != null) { builder.append("\n\t\tSource snapshot: ").append(image.osDiskImage().snapshot().id()); } if (image.osDiskImage().blobUri() != null) { builder.append("\n\t\tSource un-managed vhd: ").append(image.osDiskImage().blobUri()); } if (image.dataDiskImages() != null) { for (ImageDataDisk diskImage : image.dataDiskImages().values()) { builder.append("\n\tDisk Image (Lun) .append("\n\t\tCaching: ").append(diskImage.caching()) .append("\n\t\tSize (GB): ").append(diskImage.diskSizeGB()); if (image.isCreatedFromVirtualMachine()) { builder.append("\n\t\tSource virtual machine: ").append(image.sourceVirtualMachineId()); } if (diskImage.managedDisk() != null) { builder.append("\n\t\tSource managed disk: ").append(diskImage.managedDisk().id()); } if (diskImage.snapshot() != null) { builder.append("\n\t\tSource snapshot: ").append(diskImage.snapshot().id()); } if (diskImage.blobUri() != null) { builder.append("\n\t\tSource un-managed vhd: ").append(diskImage.blobUri()); } } } System.out.println(builder.toString()); } /** * Uploads a file to an Azure app service for Web App. * * @param profile the publishing profile for the app service. * @param fileName the name of the file on server * @param file the local file */ public static void uploadFileViaFtp(PublishingProfile profile, String fileName, InputStream file) { String path = "./site/wwwroot/webapps"; uploadFileViaFtp(profile, fileName, file, path); } /** * Uploads a file to an Azure app service for Function App. * * @param profile the publishing profile for the app service. * @param fileName the name of the file on server * @param file the local file */ public static void uploadFileForFunctionViaFtp(PublishingProfile profile, String fileName, InputStream file) { String path = "./site/wwwroot"; uploadFileViaFtp(profile, fileName, file, path); } private static void uploadFileViaFtp(PublishingProfile profile, String fileName, InputStream file, String path) { FTPClient ftpClient = new FTPClient(); String[] ftpUrlSegments = profile.ftpUrl().split("/", 2); String server = ftpUrlSegments[0]; if (fileName.contains("/")) { int lastslash = fileName.lastIndexOf('/'); path = path + "/" + fileName.substring(0, lastslash); fileName = fileName.substring(lastslash + 1); } try { ftpClient.connect(server); ftpClient.enterLocalPassiveMode(); ftpClient.login(profile.ftpUsername(), profile.ftpPassword()); ftpClient.setFileType(FTP.BINARY_FILE_TYPE); for (String segment : path.split("/")) { if (!ftpClient.changeWorkingDirectory(segment)) { ftpClient.makeDirectory(segment); ftpClient.changeWorkingDirectory(segment); } } ftpClient.storeFile(fileName, file); ftpClient.disconnect(); } catch (IOException e) { e.printStackTrace(); } } /** * Print service bus namespace info. * * @param serviceBusNamespace a service bus namespace */ public static void print(ServiceBusNamespace serviceBusNamespace) { StringBuilder builder = new StringBuilder() .append("Service bus Namespace: ").append(serviceBusNamespace.id()) .append("\n\tName: ").append(serviceBusNamespace.name()) .append("\n\tRegion: ").append(serviceBusNamespace.regionName()) .append("\n\tResourceGroupName: ").append(serviceBusNamespace.resourceGroupName()) .append("\n\tCreatedAt: ").append(serviceBusNamespace.createdAt()) .append("\n\tUpdatedAt: ").append(serviceBusNamespace.updatedAt()) .append("\n\tDnsLabel: ").append(serviceBusNamespace.dnsLabel()) .append("\n\tFQDN: ").append(serviceBusNamespace.fqdn()) .append("\n\tSku: ") .append("\n\t\tCapacity: ").append(serviceBusNamespace.sku().capacity()) .append("\n\t\tSkuName: ").append(serviceBusNamespace.sku().name()) .append("\n\t\tTier: ").append(serviceBusNamespace.sku().tier()); System.out.println(builder.toString()); } /** * Print service bus queue info. * * @param queue a service bus queue */ public static void print(Queue queue) { StringBuilder builder = new StringBuilder() .append("Service bus Queue: ").append(queue.id()) .append("\n\tName: ").append(queue.name()) .append("\n\tResourceGroupName: ").append(queue.resourceGroupName()) .append("\n\tCreatedAt: ").append(queue.createdAt()) .append("\n\tUpdatedAt: ").append(queue.updatedAt()) .append("\n\tAccessedAt: ").append(queue.accessedAt()) .append("\n\tActiveMessageCount: ").append(queue.activeMessageCount()) .append("\n\tCurrentSizeInBytes: ").append(queue.currentSizeInBytes()) .append("\n\tDeadLetterMessageCount: ").append(queue.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(queue.defaultMessageTtlDuration()) .append("\n\tDuplicateMessageDetectionHistoryDuration: ").append(queue.duplicateMessageDetectionHistoryDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(queue.isBatchedOperationsEnabled()) .append("\n\tIsDeadLetteringEnabledForExpiredMessages: ").append(queue.isDeadLetteringEnabledForExpiredMessages()) .append("\n\tIsDuplicateDetectionEnabled: ").append(queue.isDuplicateDetectionEnabled()) .append("\n\tIsExpressEnabled: ").append(queue.isExpressEnabled()) .append("\n\tIsPartitioningEnabled: ").append(queue.isPartitioningEnabled()) .append("\n\tIsSessionEnabled: ").append(queue.isSessionEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(queue.deleteOnIdleDurationInMinutes()) .append("\n\tMaxDeliveryCountBeforeDeadLetteringMessage: ").append(queue.maxDeliveryCountBeforeDeadLetteringMessage()) .append("\n\tMaxSizeInMB: ").append(queue.maxSizeInMB()) .append("\n\tMessageCount: ").append(queue.messageCount()) .append("\n\tScheduledMessageCount: ").append(queue.scheduledMessageCount()) .append("\n\tStatus: ").append(queue.status()) .append("\n\tTransferMessageCount: ").append(queue.transferMessageCount()) .append("\n\tLockDurationInSeconds: ").append(queue.lockDurationInSeconds()) .append("\n\tTransferDeadLetterMessageCount: ").append(queue.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print service bus queue authorization keys info. * * @param queueAuthorizationRule a service bus queue authorization keys */ public static void print(QueueAuthorizationRule queueAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus queue authorization rule: ").append(queueAuthorizationRule.id()) .append("\n\tName: ").append(queueAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(queueAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(queueAuthorizationRule.namespaceName()) .append("\n\tQueue Name: ").append(queueAuthorizationRule.queueName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = queueAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print service bus namespace authorization keys info. * * @param keys a service bus namespace authorization keys */ public static void print(AuthorizationKeys keys) { StringBuilder builder = new StringBuilder() .append("Authorization keys: ") .append("\n\tPrimaryKey: ").append(keys.primaryKey()) .append("\n\tPrimaryConnectionString: ").append(keys.primaryConnectionString()) .append("\n\tSecondaryKey: ").append(keys.secondaryKey()) .append("\n\tSecondaryConnectionString: ").append(keys.secondaryConnectionString()); System.out.println(builder.toString()); } /** * Print service bus namespace authorization rule info. * * @param namespaceAuthorizationRule a service bus namespace authorization rule */ public static void print(NamespaceAuthorizationRule namespaceAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus queue authorization rule: ").append(namespaceAuthorizationRule.id()) .append("\n\tName: ").append(namespaceAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(namespaceAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(namespaceAuthorizationRule.namespaceName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = namespaceAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print service bus topic info. * * @param topic a service bus topic */ public static void print(Topic topic) { StringBuilder builder = new StringBuilder() .append("Service bus topic: ").append(topic.id()) .append("\n\tName: ").append(topic.name()) .append("\n\tResourceGroupName: ").append(topic.resourceGroupName()) .append("\n\tCreatedAt: ").append(topic.createdAt()) .append("\n\tUpdatedAt: ").append(topic.updatedAt()) .append("\n\tAccessedAt: ").append(topic.accessedAt()) .append("\n\tActiveMessageCount: ").append(topic.activeMessageCount()) .append("\n\tCurrentSizeInBytes: ").append(topic.currentSizeInBytes()) .append("\n\tDeadLetterMessageCount: ").append(topic.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(topic.defaultMessageTtlDuration()) .append("\n\tDuplicateMessageDetectionHistoryDuration: ").append(topic.duplicateMessageDetectionHistoryDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(topic.isBatchedOperationsEnabled()) .append("\n\tIsDuplicateDetectionEnabled: ").append(topic.isDuplicateDetectionEnabled()) .append("\n\tIsExpressEnabled: ").append(topic.isExpressEnabled()) .append("\n\tIsPartitioningEnabled: ").append(topic.isPartitioningEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(topic.deleteOnIdleDurationInMinutes()) .append("\n\tMaxSizeInMB: ").append(topic.maxSizeInMB()) .append("\n\tScheduledMessageCount: ").append(topic.scheduledMessageCount()) .append("\n\tStatus: ").append(topic.status()) .append("\n\tTransferMessageCount: ").append(topic.transferMessageCount()) .append("\n\tSubscriptionCount: ").append(topic.subscriptionCount()) .append("\n\tTransferDeadLetterMessageCount: ").append(topic.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print service bus subscription info. * * @param serviceBusSubscription a service bus subscription */ public static void print(ServiceBusSubscription serviceBusSubscription) { StringBuilder builder = new StringBuilder() .append("Service bus subscription: ").append(serviceBusSubscription.id()) .append("\n\tName: ").append(serviceBusSubscription.name()) .append("\n\tResourceGroupName: ").append(serviceBusSubscription.resourceGroupName()) .append("\n\tCreatedAt: ").append(serviceBusSubscription.createdAt()) .append("\n\tUpdatedAt: ").append(serviceBusSubscription.updatedAt()) .append("\n\tAccessedAt: ").append(serviceBusSubscription.accessedAt()) .append("\n\tActiveMessageCount: ").append(serviceBusSubscription.activeMessageCount()) .append("\n\tDeadLetterMessageCount: ").append(serviceBusSubscription.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(serviceBusSubscription.defaultMessageTtlDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(serviceBusSubscription.isBatchedOperationsEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(serviceBusSubscription.deleteOnIdleDurationInMinutes()) .append("\n\tScheduledMessageCount: ").append(serviceBusSubscription.scheduledMessageCount()) .append("\n\tStatus: ").append(serviceBusSubscription.status()) .append("\n\tTransferMessageCount: ").append(serviceBusSubscription.transferMessageCount()) .append("\n\tIsDeadLetteringEnabledForExpiredMessages: ").append(serviceBusSubscription.isDeadLetteringEnabledForExpiredMessages()) .append("\n\tIsSessionEnabled: ").append(serviceBusSubscription.isSessionEnabled()) .append("\n\tLockDurationInSeconds: ").append(serviceBusSubscription.lockDurationInSeconds()) .append("\n\tMaxDeliveryCountBeforeDeadLetteringMessage: ").append(serviceBusSubscription.maxDeliveryCountBeforeDeadLetteringMessage()) .append("\n\tIsDeadLetteringEnabledForFilterEvaluationFailedMessages: ").append(serviceBusSubscription.isDeadLetteringEnabledForFilterEvaluationFailedMessages()) .append("\n\tTransferMessageCount: ").append(serviceBusSubscription.transferMessageCount()) .append("\n\tTransferDeadLetterMessageCount: ").append(serviceBusSubscription.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print topic Authorization Rule info. * * @param topicAuthorizationRule a topic Authorization Rule */ public static void print(TopicAuthorizationRule topicAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus topic authorization rule: ").append(topicAuthorizationRule.id()) .append("\n\tName: ").append(topicAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(topicAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(topicAuthorizationRule.namespaceName()) .append("\n\tTopic Name: ").append(topicAuthorizationRule.topicName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = topicAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print CosmosDB info. * * @param cosmosDBAccount a CosmosDB */ public static void print(CosmosDBAccount cosmosDBAccount) { StringBuilder builder = new StringBuilder() .append("CosmosDB: ").append(cosmosDBAccount.id()) .append("\n\tName: ").append(cosmosDBAccount.name()) .append("\n\tResourceGroupName: ").append(cosmosDBAccount.resourceGroupName()) .append("\n\tKind: ").append(cosmosDBAccount.kind().toString()) .append("\n\tDefault consistency level: ").append(cosmosDBAccount.consistencyPolicy().defaultConsistencyLevel()) .append("\n\tIP range filter: ").append(cosmosDBAccount.ipRangeFilter()); DatabaseAccountListKeysResult keys = cosmosDBAccount.listKeys(); DatabaseAccountListReadOnlyKeysResult readOnlyKeys = cosmosDBAccount.listReadOnlyKeys(); builder .append("\n\tPrimary Master Key: ").append(keys.primaryMasterKey()) .append("\n\tSecondary Master Key: ").append(keys.secondaryMasterKey()) .append("\n\tPrimary Read-Only Key: ").append(readOnlyKeys.primaryReadonlyMasterKey()) .append("\n\tSecondary Read-Only Key: ").append(readOnlyKeys.secondaryReadonlyMasterKey()); for (Location writeReplica : cosmosDBAccount.writableReplications()) { builder.append("\n\t\tWrite replication: ") .append("\n\t\t\tName :").append(writeReplica.locationName()); } builder.append("\n\tNumber of read replications: ").append(cosmosDBAccount.readableReplications().size()); for (Location readReplica : cosmosDBAccount.readableReplications()) { builder.append("\n\t\tRead replication: ") .append("\n\t\t\tName :").append(readReplica.locationName()); } } /** * Print Active Directory User info. * * @param user active directory user */ public static void print(ActiveDirectoryUser user) { StringBuilder builder = new StringBuilder() .append("Active Directory User: ").append(user.id()) .append("\n\tName: ").append(user.name()) .append("\n\tMail: ").append(user.mail()) .append("\n\tMail Nickname: ").append(user.mailNickname()) .append("\n\tUser Principal Name: ").append(user.userPrincipalName()); System.out.println(builder.toString()); } /** * Print Active Directory User info. * * @param role role definition */ public static void print(RoleDefinition role) { StringBuilder builder = new StringBuilder() .append("Role Definition: ").append(role.id()) .append("\n\tName: ").append(role.name()) .append("\n\tRole Name: ").append(role.roleName()) .append("\n\tType: ").append(role.type()) .append("\n\tDescription: ").append(role.description()) .append("\n\tType: ").append(role.type()); Set<Permission> permissions = role.permissions(); builder.append("\n\tPermissions: ").append(permissions.size()); for (Permission permission : permissions) { builder.append("\n\t\tPermission Actions: " + permission.actions().size()); for (String action : permission.actions()) { builder.append("\n\t\t\tName :").append(action); } builder.append("\n\t\tPermission Not Actions: " + permission.notActions().size()); for (String notAction : permission.notActions()) { builder.append("\n\t\t\tName :").append(notAction); } builder.append("\n\t\tPermission Data Actions: " + permission.dataActions().size()); for (String notAction : permission.dataActions()) { builder.append("\n\t\t\tName :").append(notAction); } } Set<String> assignableScopes = role.assignableScopes(); builder.append("\n\tAssignable scopes: ").append(assignableScopes.size()); for (String scope : assignableScopes) { builder.append("\n\t\tAssignable Scope: ") .append("\n\t\t\tName :").append(scope); } System.out.println(builder.toString()); } /** * Print Role Assignment info. * * @param roleAssignment role assignment */ public static void print(RoleAssignment roleAssignment) { StringBuilder builder = new StringBuilder() .append("Role Assignment: ") .append("\n\tScope: ").append(roleAssignment.scope()) .append("\n\tPrincipal Id: ").append(roleAssignment.principalId()) .append("\n\tRole Definition Id: ").append(roleAssignment.roleDefinitionId()); System.out.println(builder.toString()); } /** * Print Active Directory Group info. * * @param group active directory group */ public static void print(ActiveDirectoryGroup group) { StringBuilder builder = new StringBuilder() .append("Active Directory Group: ").append(group.id()) .append("\n\tName: ").append(group.name()) .append("\n\tMail: ").append(group.mail()) .append("\n\tSecurity Enabled: ").append(group.securityEnabled()) .append("\n\tGroup members:"); for (ActiveDirectoryObject object : group.listMembers()) { builder.append("\n\t\tType: ").append(object.getClass().getSimpleName()) .append("\tName: ").append(object.name()); } System.out.println(builder.toString()); } /** * Print Active Directory Application info. * * @param application active directory application */ public static void print(ActiveDirectoryApplication application) { StringBuilder builder = new StringBuilder() .append("Active Directory Application: ").append(application.id()) .append("\n\tName: ").append(application.name()) .append("\n\tSign on URL: ").append(application.signOnUrl()) .append("\n\tReply URLs:"); for (String replyUrl : application.replyUrls()) { builder.append("\n\t\t").append(replyUrl); } System.out.println(builder.toString()); } /** * Print Service Principal info. * * @param servicePrincipal service principal */ public static void print(ServicePrincipal servicePrincipal) { StringBuilder builder = new StringBuilder() .append("Service Principal: ").append(servicePrincipal.id()) .append("\n\tName: ").append(servicePrincipal.name()) .append("\n\tApplication Id: ").append(servicePrincipal.applicationId()); List<String> names = servicePrincipal.servicePrincipalNames(); builder.append("\n\tNames: ").append(names.size()); for (String name : names) { builder.append("\n\t\tName: ").append(name); } System.out.println(builder.toString()); } /** * Print Network Watcher info. * * @param nw network watcher */ public static void print(NetworkWatcher nw) { StringBuilder builder = new StringBuilder() .append("Network Watcher: ").append(nw.id()) .append("\n\tName: ").append(nw.name()) .append("\n\tResource group name: ").append(nw.resourceGroupName()) .append("\n\tRegion name: ").append(nw.regionName()); System.out.println(builder.toString()); } /** * Print packet capture info. * * @param resource packet capture */ public static void print(PacketCapture resource) { StringBuilder sb = new StringBuilder().append("Packet Capture: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tTarget id: ").append(resource.targetId()) .append("\n\tTime limit in seconds: ").append(resource.timeLimitInSeconds()) .append("\n\tBytes to capture per packet: ").append(resource.bytesToCapturePerPacket()) .append("\n\tProvisioning state: ").append(resource.provisioningState()) .append("\n\tStorage location:") .append("\n\tStorage account id: ").append(resource.storageLocation().storageId()) .append("\n\tStorage account path: ").append(resource.storageLocation().storagePath()) .append("\n\tFile path: ").append(resource.storageLocation().filePath()) .append("\n\t Packet capture filters: ").append(resource.filters().size()); for (PacketCaptureFilter filter : resource.filters()) { sb.append("\n\t\tProtocol: ").append(filter.protocol()); sb.append("\n\t\tLocal IP address: ").append(filter.localIpAddress()); sb.append("\n\t\tRemote IP address: ").append(filter.remoteIpAddress()); sb.append("\n\t\tLocal port: ").append(filter.localPort()); sb.append("\n\t\tRemote port: ").append(filter.remotePort()); } System.out.println(sb.toString()); } /** * Print verification IP flow info. * * @param resource IP flow verification info */ public static void print(VerificationIPFlow resource) { System.out.println(new StringBuilder("IP flow verification: ") .append("\n\tAccess: ").append(resource.access()) .append("\n\tRule name: ").append(resource.ruleName()) .toString()); } /** * Print topology info. * * @param resource topology */ public static void print(Topology resource) { StringBuilder sb = new StringBuilder().append("Topology: ").append(resource.id()) .append("\n\tTopology parameters: ") .append("\n\t\tResource group: ").append(resource.topologyParameters().targetResourceGroupName()) .append("\n\t\tVirtual network: ").append(resource.topologyParameters().targetVirtualNetwork() == null ? "" : resource.topologyParameters().targetVirtualNetwork().id()) .append("\n\t\tSubnet id: ").append(resource.topologyParameters().targetSubnet() == null ? "" : resource.topologyParameters().targetSubnet().id()) .append("\n\tCreated time: ").append(resource.createdTime()) .append("\n\tLast modified time: ").append(resource.lastModifiedTime()); for (TopologyResource tr : resource.resources().values()) { sb.append("\n\tTopology resource: ").append(tr.id()) .append("\n\t\tName: ").append(tr.name()) .append("\n\t\tLocation: ").append(tr.location()) .append("\n\t\tAssociations:"); for (TopologyAssociation association : tr.associations()) { sb.append("\n\t\t\tName:").append(association.name()) .append("\n\t\t\tResource id:").append(association.resourceId()) .append("\n\t\t\tAssociation type:").append(association.associationType()); } } System.out.println(sb.toString()); } /** * Print flow log settings info. * * @param resource flow log settings */ public static void print(FlowLogSettings resource) { System.out.println(new StringBuilder().append("Flow log settings: ") .append("Target resource id: ").append(resource.targetResourceId()) .append("\n\tFlow log enabled: ").append(resource.enabled()) .append("\n\tStorage account id: ").append(resource.storageId()) .append("\n\tRetention policy enabled: ").append(resource.isRetentionEnabled()) .append("\n\tRetention policy days: ").append(resource.retentionDays()) .toString()); } /** * Print availability set info. * * @param resource an availability set */ public static void print(SecurityGroupView resource) { StringBuilder sb = new StringBuilder().append("Security group view: ") .append("\n\tVirtual machine id: ").append(resource.vmId()); for (SecurityGroupNetworkInterface sgni : resource.networkInterfaces().values()) { sb.append("\n\tSecurity group network interface:").append(sgni.id()) .append("\n\t\tSecurity group network interface:") .append("\n\t\tEffective security rules:"); for (EffectiveNetworkSecurityRule rule : sgni.securityRuleAssociations().effectiveSecurityRules()) { sb.append("\n\t\t\tName: ").append(rule.name()) .append("\n\t\t\tDirection: ").append(rule.direction()) .append("\n\t\t\tAccess: ").append(rule.access()) .append("\n\t\t\tPriority: ").append(rule.priority()) .append("\n\t\t\tSource address prefix: ").append(rule.sourceAddressPrefix()) .append("\n\t\t\tSource port range: ").append(rule.sourcePortRange()) .append("\n\t\t\tDestination address prefix: ").append(rule.destinationAddressPrefix()) .append("\n\t\t\tDestination port range: ").append(rule.destinationPortRange()) .append("\n\t\t\tProtocol: ").append(rule.protocol()); } sb.append("\n\t\tSubnet:").append(sgni.securityRuleAssociations().subnetAssociation().id()); printSecurityRule(sb, sgni.securityRuleAssociations().subnetAssociation().securityRules()); if (sgni.securityRuleAssociations().networkInterfaceAssociation() != null) { sb.append("\n\t\tNetwork interface:").append(sgni.securityRuleAssociations().networkInterfaceAssociation().id()); printSecurityRule(sb, sgni.securityRuleAssociations().networkInterfaceAssociation().securityRules()); } sb.append("\n\t\tDefault security rules:"); printSecurityRule(sb, sgni.securityRuleAssociations().defaultSecurityRules()); } System.out.println(sb.toString()); } private static void printSecurityRule(StringBuilder sb, List<SecurityRuleInner> rules) { for (SecurityRuleInner rule : rules) { sb.append("\n\t\t\tName: ").append(rule.name()) .append("\n\t\t\tDirection: ").append(rule.direction()) .append("\n\t\t\tAccess: ").append(rule.access()) .append("\n\t\t\tPriority: ").append(rule.priority()) .append("\n\t\t\tSource address prefix: ").append(rule.sourceAddressPrefix()) .append("\n\t\t\tSource port range: ").append(rule.sourcePortRange()) .append("\n\t\t\tDestination address prefix: ").append(rule.destinationAddressPrefix()) .append("\n\t\t\tDestination port range: ").append(rule.destinationPortRange()) .append("\n\t\t\tProtocol: ").append(rule.protocol()) .append("\n\t\t\tDescription: ").append(rule.description()) .append("\n\t\t\tProvisioning state: ").append(rule.provisioningState()); } } /** * Print next hop info. * * @param resource an availability set */ public static void print(NextHop resource) { System.out.println(new StringBuilder("Next hop: ") .append("Next hop type: ").append(resource.nextHopType()) .append("\n\tNext hop ip address: ").append(resource.nextHopIpAddress()) .append("\n\tRoute table id: ").append(resource.routeTableId()) .toString()); } /** * Print container group info. * * @param resource a container group */ public static void print(ContainerGroup resource) { StringBuilder info = new StringBuilder().append("Container Group: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tOS type: ").append(resource.osType()); if (resource.ipAddress() != null) { info.append("\n\tPublic IP address: ").append(resource.ipAddress()); } if (resource.externalTcpPorts() != null) { info.append("\n\tExternal TCP ports:"); for (int port : resource.externalTcpPorts()) { info.append(" ").append(port); } } if (resource.externalUdpPorts() != null) { info.append("\n\tExternal UDP ports:"); for (int port : resource.externalUdpPorts()) { info.append(" ").append(port); } } if (resource.imageRegistryServers() != null) { info.append("\n\tPrivate Docker image registries:"); for (String server : resource.imageRegistryServers()) { info.append(" ").append(server); } } if (resource.volumes() != null) { info.append("\n\tVolume mapping: "); for (Map.Entry<String, Volume> entry : resource.volumes().entrySet()) { info.append("\n\t\tName: ").append(entry.getKey()).append(" -> ") .append(entry.getValue().azureFile() != null ? entry.getValue().azureFile().shareName() : "empty direcory volume"); } } if (resource.containers() != null) { info.append("\n\tContainer instances: "); for (Map.Entry<String, Container> entry : resource.containers().entrySet()) { Container container = entry.getValue(); info.append("\n\t\tName: ").append(entry.getKey()).append(" -> ").append(container.image()); info.append("\n\t\t\tResources: "); info.append(container.resources().requests().cpu()).append("CPUs "); info.append(container.resources().requests().memoryInGB()).append("GB"); info.append("\n\t\t\tPorts:"); for (ContainerPort port : container.ports()) { info.append(" ").append(port.port()); } if (container.volumeMounts() != null) { info.append("\n\t\t\tVolume mounts:"); for (VolumeMount volumeMount : container.volumeMounts()) { info.append(" ").append(volumeMount.name()).append("->").append(volumeMount.mountPath()); } } if (container.command() != null) { info.append("\n\t\t\tStart commands:"); for (String command : container.command()) { info.append("\n\t\t\t\t").append(command); } } if (container.environmentVariables() != null) { info.append("\n\t\t\tENV vars:"); for (EnvironmentVariable envVar : container.environmentVariables()) { info.append("\n\t\t\t\t").append(envVar.name()).append("=").append(envVar.value()); } } } } System.out.println(info.toString()); } /** * Print event hub namespace. * * @param resource a virtual machine */ public static void print(EventHubNamespace resource) { StringBuilder info = new StringBuilder(); info.append("Eventhub Namespace: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tAzureInsightMetricId: ").append(resource.azureInsightMetricId()) .append("\n\tIsAutoScale enabled: ").append(resource.isAutoScaleEnabled()) .append("\n\tServiceBus endpoint: ").append(resource.serviceBusEndpoint()) .append("\n\tThroughPut upper limit: ").append(resource.throughputUnitsUpperLimit()) .append("\n\tCurrent ThroughPut: ").append(resource.currentThroughputUnits()) .append("\n\tCreated time: ").append(resource.createdAt()) .append("\n\tUpdated time: ").append(resource.updatedAt()); System.out.println(info.toString()); } /** * Print event hub. * * @param resource event hub */ public static void print(EventHub resource) { StringBuilder info = new StringBuilder(); info.append("Eventhub: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tNamespace resource group: ").append(resource.namespaceResourceGroupName()) .append("\n\tNamespace: ").append(resource.namespaceName()) .append("\n\tIs data capture enabled: ").append(resource.isDataCaptureEnabled()) .append("\n\tPartition ids: ").append(resource.partitionIds()); if (resource.isDataCaptureEnabled()) { info.append("\n\t\t\tData capture window size in MB: ").append(resource.dataCaptureWindowSizeInMB()); info.append("\n\t\t\tData capture window size in seconds: ").append(resource.dataCaptureWindowSizeInSeconds()); if (resource.captureDestination() != null) { info.append("\n\t\t\tData capture storage account: ").append(resource.captureDestination().storageAccountResourceId()); info.append("\n\t\t\tData capture storage container: ").append(resource.captureDestination().blobContainer()); } } System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing. * * @param resource event hub namespace disaster recovery pairing */ public static void print(EventHubDisasterRecoveryPairing resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tPrimary namespace resource group name: ").append(resource.primaryNamespaceResourceGroupName()) .append("\n\tPrimary namespace name: ").append(resource.primaryNamespaceName()) .append("\n\tSecondary namespace: ").append(resource.secondaryNamespaceId()) .append("\n\tNamespace role: ").append(resource.namespaceRole()); System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing auth rules. * * @param resource event hub namespace disaster recovery pairing auth rule */ public static void print(DisasterRecoveryPairingAuthorizationRule resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing auth rule: ").append(resource.name()); List<String> rightsStr = new ArrayList<>(); for (AccessRights rights : resource.rights()) { rightsStr.add(rights.toString()); } info.append("\n\tRights: ").append(rightsStr); System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing auth rule key. * * @param resource event hub namespace disaster recovery pairing auth rule key */ public static void print(DisasterRecoveryPairingAuthorizationKey resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing auth key: ") .append("\n\t Alias primary connection string: ").append(resource.aliasPrimaryConnectionString()) .append("\n\t Alias secondary connection string: ").append(resource.aliasSecondaryConnectionString()) .append("\n\t Primary key: ").append(resource.primaryKey()) .append("\n\t Secondary key: ").append(resource.secondaryKey()) .append("\n\t Primary connection string: ").append(resource.primaryConnectionString()) .append("\n\t Secondary connection string: ").append(resource.secondaryConnectionString()); System.out.println(info.toString()); } /** * Print event hub consumer group. * * @param resource event hub consumer group */ public static void print(EventHubConsumerGroup resource) { StringBuilder info = new StringBuilder(); info.append("Event hub consumer group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tNamespace resource group: ").append(resource.namespaceResourceGroupName()) .append("\n\tNamespace: ").append(resource.namespaceName()) .append("\n\tEvent hub name: ").append(resource.eventHubName()) .append("\n\tUser metadata: ").append(resource.userMetadata()); System.out.println(info.toString()); } /** * Print Diagnostic Setting. * * @param resource Diagnostic Setting instance */ public static void print(DiagnosticSetting resource) { StringBuilder info = new StringBuilder("Diagnostic Setting: ") .append("\n\tId: ").append(resource.id()) .append("\n\tAssociated resource Id: ").append(resource.resourceId()) .append("\n\tName: ").append(resource.name()) .append("\n\tStorage Account Id: ").append(resource.storageAccountId()) .append("\n\tEventHub Namespace Autorization Rule Id: ").append(resource.eventHubAuthorizationRuleId()) .append("\n\tEventHub name: ").append(resource.eventHubName()) .append("\n\tLog Analytics workspace Id: ").append(resource.workspaceId()); if (resource.logs() != null && !resource.logs().isEmpty()) { info.append("\n\tLog Settings: "); for (LogSettings ls : resource.logs()) { info.append("\n\t\tCategory: ").append(ls.category()); info.append("\n\t\tRetention policy: "); if (ls.retentionPolicy() != null) { info.append(ls.retentionPolicy().days() + " days"); } else { info.append("NONE"); } } } if (resource.metrics() != null && !resource.metrics().isEmpty()) { info.append("\n\tMetric Settings: "); for (MetricSettings ls : resource.metrics()) { info.append("\n\t\tCategory: ").append(ls.category()); info.append("\n\t\tTimegrain: ").append(ls.timeGrain()); info.append("\n\t\tRetention policy: "); if (ls.retentionPolicy() != null) { info.append(ls.retentionPolicy().days() + " days"); } else { info.append("NONE"); } } } System.out.println(info.toString()); } /** * Print Action group settings. * * @param actionGroup action group instance */ public static void print(ActionGroup actionGroup) { StringBuilder info = new StringBuilder("Action Group: ") .append("\n\tId: ").append(actionGroup.id()) .append("\n\tName: ").append(actionGroup.name()) .append("\n\tShort Name: ").append(actionGroup.shortName()); if (actionGroup.emailReceivers() != null && !actionGroup.emailReceivers().isEmpty()) { info.append("\n\tEmail receivers: "); for (EmailReceiver er : actionGroup.emailReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tEMail: ").append(er.emailAddress()); info.append("\n\t\tStatus: ").append(er.status()); info.append("\n\t\t==="); } } if (actionGroup.smsReceivers() != null && !actionGroup.smsReceivers().isEmpty()) { info.append("\n\tSMS text message receivers: "); for (SmsReceiver er : actionGroup.smsReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tPhone: ").append(er.countryCode() + er.phoneNumber()); info.append("\n\t\tStatus: ").append(er.status()); info.append("\n\t\t==="); } } if (actionGroup.webhookReceivers() != null && !actionGroup.webhookReceivers().isEmpty()) { info.append("\n\tWebhook receivers: "); for (WebhookReceiver er : actionGroup.webhookReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tURI: ").append(er.serviceUri()); info.append("\n\t\t==="); } } if (actionGroup.pushNotificationReceivers() != null && !actionGroup.pushNotificationReceivers().isEmpty()) { info.append("\n\tApp Push Notification receivers: "); for (AzureAppPushReceiver er : actionGroup.pushNotificationReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tEmail: ").append(er.emailAddress()); info.append("\n\t\t==="); } } if (actionGroup.voiceReceivers() != null && !actionGroup.voiceReceivers().isEmpty()) { info.append("\n\tVoice Message receivers: "); for (VoiceReceiver er : actionGroup.voiceReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tPhone: ").append(er.countryCode() + er.phoneNumber()); info.append("\n\t\t==="); } } if (actionGroup.automationRunbookReceivers() != null && !actionGroup.automationRunbookReceivers().isEmpty()) { info.append("\n\tAutomation Runbook receivers: "); for (AutomationRunbookReceiver er : actionGroup.automationRunbookReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tRunbook Name: ").append(er.runbookName()); info.append("\n\t\tAccount Id: ").append(er.automationAccountId()); info.append("\n\t\tIs Global: ").append(er.isGlobalRunbook()); info.append("\n\t\tService URI: ").append(er.serviceUri()); info.append("\n\t\tWebhook resource Id: ").append(er.webhookResourceId()); info.append("\n\t\t==="); } } if (actionGroup.azureFunctionReceivers() != null && !actionGroup.azureFunctionReceivers().isEmpty()) { info.append("\n\tAzure Functions receivers: "); for (AzureFunctionReceiver er : actionGroup.azureFunctionReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tFunction Name: ").append(er.functionName()); info.append("\n\t\tFunction App Resource Id: ").append(er.functionAppResourceId()); info.append("\n\t\tFunction Trigger URI: ").append(er.httpTriggerUrl()); info.append("\n\t\t==="); } } if (actionGroup.logicAppReceivers() != null && !actionGroup.logicAppReceivers().isEmpty()) { info.append("\n\tLogic App receivers: "); for (LogicAppReceiver er : actionGroup.logicAppReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tResource Id: ").append(er.resourceId()); info.append("\n\t\tCallback URL: ").append(er.callbackUrl()); info.append("\n\t\t==="); } } if (actionGroup.itsmReceivers() != null && !actionGroup.itsmReceivers().isEmpty()) { info.append("\n\tITSM receivers: "); for (ItsmReceiver er : actionGroup.itsmReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tWorkspace Id: ").append(er.workspaceId()); info.append("\n\t\tConnection Id: ").append(er.connectionId()); info.append("\n\t\tRegion: ").append(er.region()); info.append("\n\t\tTicket Configuration: ").append(er.ticketConfiguration()); info.append("\n\t\t==="); } } System.out.println(info.toString()); } /** * Print activity log alert settings. * * @param activityLogAlert activity log instance */ public static void print(ActivityLogAlert activityLogAlert) { StringBuilder info = new StringBuilder("Activity Log Alert: ") .append("\n\tId: ").append(activityLogAlert.id()) .append("\n\tName: ").append(activityLogAlert.name()) .append("\n\tDescription: ").append(activityLogAlert.description()) .append("\n\tIs Enabled: ").append(activityLogAlert.enabled()); if (activityLogAlert.scopes() != null && !activityLogAlert.scopes().isEmpty()) { info.append("\n\tScopes: "); for (String er : activityLogAlert.scopes()) { info.append("\n\t\tId: ").append(er); } } if (activityLogAlert.actionGroupIds() != null && !activityLogAlert.actionGroupIds().isEmpty()) { info.append("\n\tAction Groups: "); for (String er : activityLogAlert.actionGroupIds()) { info.append("\n\t\tAction Group Id: ").append(er); } } if (activityLogAlert.equalsConditions() != null && !activityLogAlert.equalsConditions().isEmpty()) { info.append("\n\tAlert conditions (when all of is true): "); for (Map.Entry<String, String> er : activityLogAlert.equalsConditions().entrySet()) { info.append("\n\t\t'").append(er.getKey()).append("' equals '").append(er.getValue()).append("'"); } } System.out.println(info.toString()); } /** * Print metric alert settings. * * @param metricAlert metric alert instance */ public static void print(MetricAlert metricAlert) { StringBuilder info = new StringBuilder("Metric Alert: ") .append("\n\tId: ").append(metricAlert.id()) .append("\n\tName: ").append(metricAlert.name()) .append("\n\tDescription: ").append(metricAlert.description()) .append("\n\tIs Enabled: ").append(metricAlert.enabled()) .append("\n\tIs Auto Mitigated: ").append(metricAlert.autoMitigate()) .append("\n\tSeverity: ").append(metricAlert.severity()) .append("\n\tWindow Size: ").append(metricAlert.windowSize()) .append("\n\tEvaluation Frequency: ").append(metricAlert.evaluationFrequency()); if (metricAlert.scopes() != null && !metricAlert.scopes().isEmpty()) { info.append("\n\tScopes: "); for (String er : metricAlert.scopes()) { info.append("\n\t\tId: ").append(er); } } if (metricAlert.actionGroupIds() != null && !metricAlert.actionGroupIds().isEmpty()) { info.append("\n\tAction Groups: "); for (String er : metricAlert.actionGroupIds()) { info.append("\n\t\tAction Group Id: ").append(er); } } if (metricAlert.alertCriterias() != null && !metricAlert.alertCriterias().isEmpty()) { info.append("\n\tAlert conditions (when all of is true): "); for (Map.Entry<String, MetricAlertCondition> er : metricAlert.alertCriterias().entrySet()) { MetricAlertCondition alertCondition = er.getValue(); info.append("\n\t\tCondition name: ").append(er.getKey()) .append("\n\t\tSignal name: ").append(alertCondition.metricName()) .append("\n\t\tMetric Namespace: ").append(alertCondition.metricNamespace()) .append("\n\t\tOperator: ").append(alertCondition.condition()) .append("\n\t\tThreshold: ").append(alertCondition.threshold()) .append("\n\t\tTime Aggregation: ").append(alertCondition.timeAggregation()); if (alertCondition.dimensions() != null && !alertCondition.dimensions().isEmpty()) { for (MetricDimension dimon : alertCondition.dimensions()) { info.append("\n\t\tDimension Filter: ").append("Name [").append(dimon.name()).append("] operator [Include] values["); for (String vals : dimon.values()) { info.append(vals).append(", "); } info.append("]"); } } } } System.out.println(info.toString()); } /** * Print spring service settings. * * @param springService spring service instance */ public static void print(SpringService springService) { StringBuilder info = new StringBuilder("Spring Service: ") .append("\n\tId: ").append(springService.id()) .append("\n\tName: ").append(springService.name()) .append("\n\tResource Group: ").append(springService.resourceGroupName()) .append("\n\tRegion: ").append(springService.region()) .append("\n\tTags: ").append(springService.tags()); ConfigServerProperties serverProperties = springService.getServerProperties(); if (serverProperties != null && serverProperties.provisioningState() != null && serverProperties.provisioningState().equals(ConfigServerState.SUCCEEDED) && serverProperties.configServer() != null) { info.append("\n\tProperties: "); if (serverProperties.configServer().gitProperty() != null) { info.append("\n\t\tGit: ").append(serverProperties.configServer().gitProperty().uri()); } } if (springService.sku() != null) { info.append("\n\tSku: ") .append("\n\t\tName: ").append(springService.sku().name()) .append("\n\t\tTier: ").append(springService.sku().tier()) .append("\n\t\tCapacity: ").append(springService.sku().capacity()); } MonitoringSettingProperties monitoringSettingProperties = springService.getMonitoringSetting(); if (monitoringSettingProperties != null && monitoringSettingProperties.provisioningState() != null && monitoringSettingProperties.provisioningState().equals(MonitoringSettingState.SUCCEEDED)) { info.append("\n\tTrace: ") .append("\n\t\tEnabled: ").append(monitoringSettingProperties.traceEnabled()) .append("\n\t\tApp Insight Instrumentation Key: ").append(monitoringSettingProperties.appInsightsInstrumentationKey()); } System.out.println(info.toString()); } /** * Print spring app settings. * * @param springApp spring app instance */ public static void print(SpringApp springApp) { StringBuilder info = new StringBuilder("Spring Service: ") .append("\n\tId: ").append(springApp.id()) .append("\n\tName: ").append(springApp.name()) .append("\n\tPublic Endpoint: ").append(springApp.isPublic()) .append("\n\tUrl: ").append(springApp.url()) .append("\n\tHttps Only: ").append(springApp.isHttpsOnly()) .append("\n\tFully Qualified Domain Name: ").append(springApp.fqdn()) .append("\n\tActive Deployment Name: ").append(springApp.activeDeploymentName()); if (springApp.temporaryDisk() != null) { info.append("\n\tTemporary Disk:") .append("\n\t\tSize In GB: ").append(springApp.temporaryDisk().sizeInGB()) .append("\n\t\tMount Path: ").append(springApp.temporaryDisk().mountPath()); } if (springApp.persistentDisk() != null) { info.append("\n\tPersistent Disk:") .append("\n\t\tSize In GB: ").append(springApp.persistentDisk().sizeInGB()) .append("\n\t\tMount Path: ").append(springApp.persistentDisk().mountPath()); } if (springApp.identity() != null) { info.append("\n\tIdentity:") .append("\n\t\tType: ").append(springApp.identity().type()) .append("\n\t\tPrincipal Id: ").append(springApp.identity().principalId()) .append("\n\t\tTenant Id: ").append(springApp.identity().tenantId()); } System.out.println(info.toString()); } /** * Print private link resource. * * @param privateLinkResource the private link resource */ public static void print(PrivateLinkResource privateLinkResource) { StringBuilder info = new StringBuilder("Private Link Resource: ") .append("\n\tGroup ID: ").append(privateLinkResource.groupId()) .append("\n\tRequired Member Names: ").append(privateLinkResource.requiredMemberNames()) .append("\n\tRequired DNS Zone Names: ").append(privateLinkResource.requiredDnsZoneNames()); System.out.println(info); } /** * Print private endpoint. * * @param privateEndpoint the private endpoint */ public static void print(PrivateEndpoint privateEndpoint) { StringBuilder info = new StringBuilder("Private Endpoint: ") .append("\n\tId: ").append(privateEndpoint.id()) .append("\n\tName: ").append(privateEndpoint.name()); if (privateEndpoint.privateLinkServiceConnections() != null && !privateEndpoint.privateLinkServiceConnections().isEmpty()) { for (PrivateEndpoint.PrivateLinkServiceConnection connection : privateEndpoint.privateLinkServiceConnections().values()) { info .append("\n\t\tPrivate Link Service Connection Name: ").append(connection.name()) .append("\n\t\tPrivate Link Resource ID: ").append(connection.privateLinkResourceId()) .append("\n\t\tSub Resource Names: ").append(connection.subResourceNames()) .append("\n\t\tProvision Status: ").append(connection.state().status()); } } if (privateEndpoint.privateLinkServiceConnections() != null && !privateEndpoint.privateLinkServiceConnections().isEmpty()) { info.append("\n\tPrivate Link Service Connections:"); for (PrivateEndpoint.PrivateLinkServiceConnection connection : privateEndpoint.privateLinkServiceConnections().values()) { info .append("\n\t\tName: ").append(connection.name()) .append("\n\t\tPrivate Link Resource ID: ").append(connection.privateLinkResourceId()) .append("\n\t\tSub Resource Names: ").append(connection.subResourceNames()) .append("\n\t\tStatus: ").append(connection.state().status()); } } if (privateEndpoint.customDnsConfigurations() != null && !privateEndpoint.customDnsConfigurations().isEmpty()) { info.append("\n\tCustom DNS Configure:"); for (CustomDnsConfigPropertiesFormat customDns : privateEndpoint.customDnsConfigurations()) { info .append("\n\t\tFQDN: ").append(customDns.fqdn()) .append("\n\t\tIP Address: ").append(customDns.ipAddresses()); } } System.out.println(info); } /** * Sends a GET request to target URL. * <p> * Retry logic tuned for AppService. * The method does not handle 301 redirect. * * @param urlString the target URL. * @return Content of the HTTP response. */ public static String sendGetRequest(String urlString) { HttpRequest request = new HttpRequest(HttpMethod.GET, urlString); Mono<Response<String>> response = stringResponse(HTTP_PIPELINE.send(request) .flatMap(response1 -> { int code = response1.getStatusCode(); if (code == 200 || code == 400 || code == 404) { return Mono.just(response1); } else { return Mono.error(new HttpResponseException(response1)); } }) .retryWhen(Retry .fixedDelay(5, Duration.ofSeconds(30)) .filter(t -> { boolean retry = false; if (t instanceof TimeoutException) { retry = true; } else if (t instanceof HttpResponseException && ((HttpResponseException) t).getResponse().getStatusCode() == 503) { retry = true; } if (retry) { LOGGER.info("retry GET request to {}", urlString); } return retry; }))); Response<String> ret = response.block(); return ret == null ? null : ret.getValue(); } /** * Sends a POST request to target URL. * <p> * Retry logic tuned for AppService. * * @param urlString the target URL. * @param body the request body. * @return Content of the HTTP response. * */ public static String sendPostRequest(String urlString, String body) { try { HttpRequest request = new HttpRequest(HttpMethod.POST, urlString).setBody(body); Mono<Response<String>> response = stringResponse(HTTP_PIPELINE.send(request) .flatMap(response1 -> { int code = response1.getStatusCode(); if (code == 200 || code == 400 || code == 404) { return Mono.just(response1); } else { return Mono.error(new HttpResponseException(response1)); } }) .retryWhen(Retry .fixedDelay(5, Duration.ofSeconds(30)) .filter(t -> { boolean retry = false; if (t instanceof TimeoutException) { retry = true; } if (retry) { LOGGER.info("retry POST request to {}", urlString); } return retry; }))); Response<String> ret = response.block(); return ret == null ? null : ret.getValue(); } catch (Exception e) { LOGGER.logThrowableAsError(e); return null; } } private static Mono<Response<String>> stringResponse(Mono<HttpResponse> responseMono) { return responseMono.flatMap(response -> response.getBodyAsString() .map(str -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), str))); } private static final HttpPipeline HTTP_PIPELINE = new HttpPipelineBuilder() .policies( new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)), new RetryPolicy("Retry-After", ChronoUnit.SECONDS)) .build(); /** * Get the size of the iterable. * * @param iterable iterable to count size * @param <T> generic type parameter of the iterable * @return size of the iterable */ public static <T> int getSize(Iterable<T> iterable) { int res = 0; Iterator<T> iterator = iterable.iterator(); while (iterator.hasNext()) { iterator.next(); res++; } return res; } }
for (String notAction : permission.dataActions()) {
public static String getSecondaryServicePrincipalClientID(String envSecondaryServicePrincipal) throws IOException { String content = new String(Files.readAllBytes(new File(envSecondaryServicePrincipal).toPath()), StandardCharsets.UTF_8).trim(); HashMap<String, String> auth = new HashMap<>(); if (content.startsWith("{")) { auth = new JacksonAdapter().deserialize(content, auth.getClass(), SerializerEncoding.JSON); return auth.get("clientId"); } else { Properties authSettings = new Properties(); try (FileInputStream credentialsFileStream = new FileInputStream(new File(envSecondaryServicePrincipal))) { authSettings.load(credentialsFileStream); } return authSettings.getProperty("client"); } } /** * Retrieve the secondary service principal secret. * * @param envSecondaryServicePrincipal an Azure Container Registry * @return a service principal secret * @throws IOException exception */ public static String getSecondaryServicePrincipalSecret(String envSecondaryServicePrincipal) throws IOException { String content = new String(Files.readAllBytes(new File(envSecondaryServicePrincipal).toPath()), StandardCharsets.UTF_8).trim(); HashMap<String, String> auth = new HashMap<>(); if (content.startsWith("{")) { auth = new JacksonAdapter().deserialize(content, auth.getClass(), SerializerEncoding.JSON); return auth.get("clientSecret"); } else { Properties authSettings = new Properties(); try (FileInputStream credentialsFileStream = new FileInputStream(new File(envSecondaryServicePrincipal))) { authSettings.load(credentialsFileStream); } return authSettings.getProperty("key"); } } /** * This method creates a certificate for given password. * * @param certPath location of certificate file * @param pfxPath location of pfx file * @param alias User alias * @param password alias password * @param cnName domain name * @param dnsName dns name in subject alternate name * @throws IOException IO Exception */ public static void createCertificate(String certPath, String pfxPath, String alias, String password, String cnName, String dnsName) throws IOException { if (new File(pfxPath).exists()) { return; } String validityInDays = "3650"; String keyAlg = "RSA"; String sigAlg = "SHA1withRSA"; String keySize = "2048"; String storeType = "pkcs12"; String command = "keytool"; String jdkPath = System.getProperty("java.home"); if (jdkPath != null && !jdkPath.isEmpty()) { jdkPath = jdkPath.concat("\\bin"); if (new File(jdkPath).isDirectory()) { command = String.format("%s%s%s", jdkPath, File.separator, command); } } else { return; } String[] commandArgs = {command, "-genkey", "-alias", alias, "-keystore", pfxPath, "-storepass", password, "-validity", validityInDays, "-keyalg", keyAlg, "-sigalg", sigAlg, "-keysize", keySize, "-storetype", storeType, "-dname", "CN=" + cnName, "-ext", "EKU=1.3.6.1.5.5.7.3.1"}; if (dnsName != null) { List<String> args = new ArrayList<>(Arrays.asList(commandArgs)); args.add("-ext"); args.add("san=dns:" + dnsName); commandArgs = args.toArray(new String[0]); } Utils.cmdInvocation(commandArgs, true); File pfxFile = new File(pfxPath); if (pfxFile.exists()) { String[] certCommandArgs = {command, "-export", "-alias", alias, "-storetype", storeType, "-keystore", pfxPath, "-storepass", password, "-rfc", "-file", certPath}; Utils.cmdInvocation(certCommandArgs, true); File cerFile = new File(pfxPath); if (!cerFile.exists()) { throw new IOException( "Error occurred while creating certificate" + String.join(" ", certCommandArgs)); } } else { throw new IOException("Error occurred while creating certificates" + String.join(" ", commandArgs)); } } /** * This method is used for invoking native commands. * * @param command :- command to invoke. * @param ignoreErrorStream : Boolean which controls whether to throw exception or not * based on error stream. * @return result :- depending on the method invocation. * @throws IOException exceptions thrown from the execution */ public static String cmdInvocation(String[] command, boolean ignoreErrorStream) throws IOException { String result = ""; String error = ""; Process process = new ProcessBuilder(command).start(); try ( InputStream inputStream = process.getInputStream(); InputStream errorStream = process.getErrorStream(); BufferedReader br = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); BufferedReader ebr = new BufferedReader(new InputStreamReader(errorStream, StandardCharsets.UTF_8)); ) { result = br.readLine(); process.waitFor(); error = ebr.readLine(); if (error != null && (!"".equals(error))) { if (!ignoreErrorStream) { throw new IOException(error, null); } } } catch (Exception e) { throw new RuntimeException("Exception occurred while invoking command", e); } return result; } /** * Prints information for passed SQL Server. * * @param sqlServer sqlServer to be printed */ public static void print(SqlServer sqlServer) { StringBuilder builder = new StringBuilder().append("Sql Server: ").append(sqlServer.id()) .append("Name: ").append(sqlServer.name()) .append("\n\tResource group: ").append(sqlServer.resourceGroupName()) .append("\n\tRegion: ").append(sqlServer.region()) .append("\n\tSqlServer version: ").append(sqlServer.version()) .append("\n\tFully qualified name for Sql Server: ").append(sqlServer.fullyQualifiedDomainName()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL Database. * * @param database database to be printed */ public static void print(SqlDatabase database) { StringBuilder builder = new StringBuilder().append("Sql Database: ").append(database.id()) .append("Name: ").append(database.name()) .append("\n\tResource group: ").append(database.resourceGroupName()) .append("\n\tRegion: ").append(database.region()) .append("\n\tSqlServer Name: ").append(database.sqlServerName()) .append("\n\tEdition of SQL database: ").append(database.edition()) .append("\n\tCollation of SQL database: ").append(database.collation()) .append("\n\tCreation date of SQL database: ").append(database.creationDate()) .append("\n\tIs data warehouse: ").append(database.isDataWarehouse()) .append("\n\tRequested service objective of SQL database: ").append(database.requestedServiceObjectiveName()) .append("\n\tName of current service objective of SQL database: ").append(database.currentServiceObjectiveName()) .append("\n\tMax size bytes of SQL database: ").append(database.maxSizeBytes()) .append("\n\tDefault secondary location of SQL database: ").append(database.defaultSecondaryLocation()); System.out.println(builder.toString()); } /** * Prints information for the passed firewall rule. * * @param firewallRule firewall rule to be printed. */ public static void print(SqlFirewallRule firewallRule) { StringBuilder builder = new StringBuilder().append("Sql firewall rule: ").append(firewallRule.id()) .append("Name: ").append(firewallRule.name()) .append("\n\tResource group: ").append(firewallRule.resourceGroupName()) .append("\n\tRegion: ").append(firewallRule.region()) .append("\n\tSqlServer Name: ").append(firewallRule.sqlServerName()) .append("\n\tStart IP Address of the firewall rule: ").append(firewallRule.startIpAddress()) .append("\n\tEnd IP Address of the firewall rule: ").append(firewallRule.endIpAddress()); System.out.println(builder.toString()); } /** * Prints information for the passed virtual network rule. * * @param virtualNetworkRule virtual network rule to be printed. */ public static void print(SqlVirtualNetworkRule virtualNetworkRule) { StringBuilder builder = new StringBuilder().append("SQL virtual network rule: ").append(virtualNetworkRule.id()) .append("Name: ").append(virtualNetworkRule.name()) .append("\n\tResource group: ").append(virtualNetworkRule.resourceGroupName()) .append("\n\tSqlServer Name: ").append(virtualNetworkRule.sqlServerName()) .append("\n\tSubnet ID: ").append(virtualNetworkRule.subnetId()) .append("\n\tState: ").append(virtualNetworkRule.state()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL subscription usage metric. * * @param subscriptionUsageMetric metric to be printed. */ public static void print(SqlSubscriptionUsageMetric subscriptionUsageMetric) { StringBuilder builder = new StringBuilder().append("SQL Subscription Usage Metric: ").append(subscriptionUsageMetric.id()) .append("Name: ").append(subscriptionUsageMetric.name()) .append("\n\tDisplay Name: ").append(subscriptionUsageMetric.displayName()) .append("\n\tCurrent Value: ").append(subscriptionUsageMetric.currentValue()) .append("\n\tLimit: ").append(subscriptionUsageMetric.limit()) .append("\n\tUnit: ").append(subscriptionUsageMetric.unit()) .append("\n\tType: ").append(subscriptionUsageMetric.type()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL database usage metric. * * @param dbUsageMetric metric to be printed. */ public static void print(SqlDatabaseUsageMetric dbUsageMetric) { StringBuilder builder = new StringBuilder().append("SQL Database Usage Metric") .append("Name: ").append(dbUsageMetric.name()) .append("\n\tDisplay Name: ").append(dbUsageMetric.displayName()) .append("\n\tCurrent Value: ").append(dbUsageMetric.currentValue()) .append("\n\tLimit: ").append(dbUsageMetric.limit()) .append("\n\tUnit: ").append(dbUsageMetric.unit()); System.out.println(builder.toString()); } /** * Prints information for the passed Failover Group. * * @param failoverGroup the SQL Failover Group to be printed. */ public static void print(SqlFailoverGroup failoverGroup) { StringBuilder builder = new StringBuilder().append("SQL Failover Group: ").append(failoverGroup.id()) .append("Name: ").append(failoverGroup.name()) .append("\n\tResource group: ").append(failoverGroup.resourceGroupName()) .append("\n\tSqlServer Name: ").append(failoverGroup.sqlServerName()) .append("\n\tRead-write endpoint policy: ").append(failoverGroup.readWriteEndpointPolicy()) .append("\n\tData loss grace period: ").append(failoverGroup.readWriteEndpointDataLossGracePeriodMinutes()) .append("\n\tRead-only endpoint policy: ").append(failoverGroup.readOnlyEndpointPolicy()) .append("\n\tReplication state: ").append(failoverGroup.replicationState()) .append("\n\tReplication role: ").append(failoverGroup.replicationRole()); builder.append("\n\tPartner Servers: "); for (PartnerInfo item : failoverGroup.partnerServers()) { builder .append("\n\t\tId: ").append(item.id()) .append("\n\t\tLocation: ").append(item.location()) .append("\n\t\tReplication role: ").append(item.replicationRole()); } builder.append("\n\tDatabases: "); for (String databaseId : failoverGroup.databases()) { builder.append("\n\t\tID: ").append(databaseId); } System.out.println(builder.toString()); } /** * Prints information for the passed SQL server key. * * @param serverKey virtual network rule to be printed. */ public static void print(SqlServerKey serverKey) { StringBuilder builder = new StringBuilder().append("SQL server key: ").append(serverKey.id()) .append("Name: ").append(serverKey.name()) .append("\n\tResource group: ").append(serverKey.resourceGroupName()) .append("\n\tSqlServer Name: ").append(serverKey.sqlServerName()) .append("\n\tRegion: ").append(serverKey.region() != null ? serverKey.region().name() : "") .append("\n\tServer Key Type: ").append(serverKey.serverKeyType()) .append("\n\tServer Key URI: ").append(serverKey.uri()) .append("\n\tServer Key Thumbprint: ").append(serverKey.thumbprint()) .append("\n\tServer Key Creation Date: ").append(serverKey.creationDate() != null ? serverKey.creationDate().toString() : ""); System.out.println(builder.toString()); } /** * Prints information of the elastic pool passed in. * * @param elasticPool elastic pool to be printed */ public static void print(SqlElasticPool elasticPool) { StringBuilder builder = new StringBuilder().append("Sql elastic pool: ").append(elasticPool.id()) .append("Name: ").append(elasticPool.name()) .append("\n\tResource group: ").append(elasticPool.resourceGroupName()) .append("\n\tRegion: ").append(elasticPool.region()) .append("\n\tSqlServer Name: ").append(elasticPool.sqlServerName()) .append("\n\tEdition of elastic pool: ").append(elasticPool.edition()) .append("\n\tTotal number of DTUs in the elastic pool: ").append(elasticPool.dtu()) .append("\n\tMaximum DTUs a database can get in elastic pool: ").append(elasticPool.databaseDtuMax()) .append("\n\tMinimum DTUs a database is guaranteed in elastic pool: ").append(elasticPool.databaseDtuMin()) .append("\n\tCreation date for the elastic pool: ").append(elasticPool.creationDate()) .append("\n\tState of the elastic pool: ").append(elasticPool.state()) .append("\n\tStorage capacity in MBs for the elastic pool: ").append(elasticPool.storageCapacity()); System.out.println(builder.toString()); } /** * Prints information of the elastic pool activity. * * @param elasticPoolActivity elastic pool activity to be printed */ public static void print(ElasticPoolActivity elasticPoolActivity) { StringBuilder builder = new StringBuilder().append("Sql elastic pool activity: ").append(elasticPoolActivity.id()) .append("Name: ").append(elasticPoolActivity.name()) .append("\n\tResource group: ").append(elasticPoolActivity.resourceGroupName()) .append("\n\tState: ").append(elasticPoolActivity.state()) .append("\n\tElastic pool name: ").append(elasticPoolActivity.elasticPoolName()) .append("\n\tStart time of activity: ").append(elasticPoolActivity.startTime()) .append("\n\tEnd time of activity: ").append(elasticPoolActivity.endTime()) .append("\n\tError code of activity: ").append(elasticPoolActivity.errorCode()) .append("\n\tError message of activity: ").append(elasticPoolActivity.errorMessage()) .append("\n\tError severity of activity: ").append(elasticPoolActivity.errorSeverity()) .append("\n\tOperation: ").append(elasticPoolActivity.operation()) .append("\n\tCompleted percentage of activity: ").append(elasticPoolActivity.percentComplete()); System.out.println(builder.toString()); } /** * Print an application gateway. * * @param resource an application gateway */ public static void print(ApplicationGateway resource) { StringBuilder info = new StringBuilder(); info.append("Application gateway: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tSKU: ").append(resource.sku().toString()) .append("\n\tOperational state: ").append(resource.operationalState()) .append("\n\tInternet-facing? ").append(resource.isPublic()) .append("\n\tInternal? ").append(resource.isPrivate()) .append("\n\tDefault private IP address: ").append(resource.privateIpAddress()) .append("\n\tPrivate IP address allocation method: ").append(resource.privateIpAllocationMethod()) .append("\n\tDisabled SSL protocols: ").append(resource.disabledSslProtocols().toString()); Map<String, ApplicationGatewayIpConfiguration> ipConfigs = resource.ipConfigurations(); info.append("\n\tIP configurations: ").append(ipConfigs.size()); for (ApplicationGatewayIpConfiguration ipConfig : ipConfigs.values()) { info.append("\n\t\tName: ").append(ipConfig.name()) .append("\n\t\t\tNetwork id: ").append(ipConfig.networkId()) .append("\n\t\t\tSubnet name: ").append(ipConfig.subnetName()); } Map<String, ApplicationGatewayFrontend> frontends = resource.frontends(); info.append("\n\tFrontends: ").append(frontends.size()); for (ApplicationGatewayFrontend frontend : frontends.values()) { info.append("\n\t\tName: ").append(frontend.name()) .append("\n\t\t\tPublic? ").append(frontend.isPublic()); if (frontend.isPublic()) { info.append("\n\t\t\tPublic IP address ID: ").append(frontend.publicIpAddressId()); } if (frontend.isPrivate()) { info.append("\n\t\t\tPrivate IP address: ").append(frontend.privateIpAddress()) .append("\n\t\t\tPrivate IP allocation method: ").append(frontend.privateIpAllocationMethod()) .append("\n\t\t\tSubnet name: ").append(frontend.subnetName()) .append("\n\t\t\tVirtual network ID: ").append(frontend.networkId()); } } Map<String, ApplicationGatewayBackend> backends = resource.backends(); info.append("\n\tBackends: ").append(backends.size()); for (ApplicationGatewayBackend backend : backends.values()) { info.append("\n\t\tName: ").append(backend.name()) .append("\n\t\t\tAssociated NIC IP configuration IDs: ").append(backend.backendNicIPConfigurationNames().keySet()); Collection<ApplicationGatewayBackendAddress> addresses = backend.addresses(); info.append("\n\t\t\tAddresses: ").append(addresses.size()); for (ApplicationGatewayBackendAddress address : addresses) { info.append("\n\t\t\t\tFQDN: ").append(address.fqdn()) .append("\n\t\t\t\tIP: ").append(address.ipAddress()); } } Map<String, ApplicationGatewayBackendHttpConfiguration> httpConfigs = resource.backendHttpConfigurations(); info.append("\n\tHTTP Configurations: ").append(httpConfigs.size()); for (ApplicationGatewayBackendHttpConfiguration httpConfig : httpConfigs.values()) { info.append("\n\t\tName: ").append(httpConfig.name()) .append("\n\t\t\tCookie based affinity: ").append(httpConfig.cookieBasedAffinity()) .append("\n\t\t\tPort: ").append(httpConfig.port()) .append("\n\t\t\tRequest timeout in seconds: ").append(httpConfig.requestTimeout()) .append("\n\t\t\tProtocol: ").append(httpConfig.protocol()) .append("\n\t\tHost header: ").append(httpConfig.hostHeader()) .append("\n\t\tHost header comes from backend? ").append(httpConfig.isHostHeaderFromBackend()) .append("\n\t\tConnection draining timeout in seconds: ").append(httpConfig.connectionDrainingTimeoutInSeconds()) .append("\n\t\tAffinity cookie name: ").append(httpConfig.affinityCookieName()) .append("\n\t\tPath: ").append(httpConfig.path()); ApplicationGatewayProbe probe = httpConfig.probe(); if (probe != null) { info.append("\n\t\tProbe: " + probe.name()); } info.append("\n\t\tIs probe enabled? ").append(httpConfig.isProbeEnabled()); } Map<String, ApplicationGatewaySslCertificate> sslCerts = resource.sslCertificates(); info.append("\n\tSSL certificates: ").append(sslCerts.size()); for (ApplicationGatewaySslCertificate cert : sslCerts.values()) { info.append("\n\t\tName: ").append(cert.name()) .append("\n\t\t\tCert data: ").append(cert.publicData()); } Map<String, ApplicationGatewayRedirectConfiguration> redirects = resource.redirectConfigurations(); info.append("\n\tRedirect configurations: ").append(redirects.size()); for (ApplicationGatewayRedirectConfiguration redirect : redirects.values()) { info.append("\n\t\tName: ").append(redirect.name()) .append("\n\t\tTarget URL: ").append(redirect.type()) .append("\n\t\tTarget URL: ").append(redirect.targetUrl()) .append("\n\t\tTarget listener: ").append(redirect.targetListener() != null ? redirect.targetListener().name() : null) .append("\n\t\tIs path included? ").append(redirect.isPathIncluded()) .append("\n\t\tIs query string included? ").append(redirect.isQueryStringIncluded()) .append("\n\t\tReferencing request routing rules: ").append(redirect.requestRoutingRules().values()); } Map<String, ApplicationGatewayListener> listeners = resource.listeners(); info.append("\n\tHTTP listeners: ").append(listeners.size()); for (ApplicationGatewayListener listener : listeners.values()) { info.append("\n\t\tName: ").append(listener.name()) .append("\n\t\t\tHost name: ").append(listener.hostname()) .append("\n\t\t\tServer name indication required? ").append(listener.requiresServerNameIndication()) .append("\n\t\t\tAssociated frontend name: ").append(listener.frontend().name()) .append("\n\t\t\tFrontend port name: ").append(listener.frontendPortName()) .append("\n\t\t\tFrontend port number: ").append(listener.frontendPortNumber()) .append("\n\t\t\tProtocol: ").append(listener.protocol().toString()); if (listener.sslCertificate() != null) { info.append("\n\t\t\tAssociated SSL certificate: ").append(listener.sslCertificate().name()); } } Map<String, ApplicationGatewayProbe> probes = resource.probes(); info.append("\n\tProbes: ").append(probes.size()); for (ApplicationGatewayProbe probe : probes.values()) { info.append("\n\t\tName: ").append(probe.name()) .append("\n\t\tProtocol:").append(probe.protocol().toString()) .append("\n\t\tInterval in seconds: ").append(probe.timeBetweenProbesInSeconds()) .append("\n\t\tRetries: ").append(probe.retriesBeforeUnhealthy()) .append("\n\t\tTimeout: ").append(probe.timeoutInSeconds()) .append("\n\t\tHost: ").append(probe.host()) .append("\n\t\tHealthy HTTP response status code ranges: ").append(probe.healthyHttpResponseStatusCodeRanges()) .append("\n\t\tHealthy HTTP response body contents: ").append(probe.healthyHttpResponseBodyContents()); } Map<String, ApplicationGatewayRequestRoutingRule> rules = resource.requestRoutingRules(); info.append("\n\tRequest routing rules: ").append(rules.size()); for (ApplicationGatewayRequestRoutingRule rule : rules.values()) { info.append("\n\t\tName: ").append(rule.name()) .append("\n\t\tType: ").append(rule.ruleType()) .append("\n\t\tPublic IP address ID: ").append(rule.publicIpAddressId()) .append("\n\t\tHost name: ").append(rule.hostname()) .append("\n\t\tServer name indication required? ").append(rule.requiresServerNameIndication()) .append("\n\t\tFrontend port: ").append(rule.frontendPort()) .append("\n\t\tFrontend protocol: ").append(rule.frontendProtocol().toString()) .append("\n\t\tBackend port: ").append(rule.backendPort()) .append("\n\t\tCookie based affinity enabled? ").append(rule.cookieBasedAffinity()) .append("\n\t\tRedirect configuration: ").append(rule.redirectConfiguration() != null ? rule.redirectConfiguration().name() : "(none)"); Collection<ApplicationGatewayBackendAddress> addresses = rule.backendAddresses(); info.append("\n\t\t\tBackend addresses: ").append(addresses.size()); for (ApplicationGatewayBackendAddress address : addresses) { info.append("\n\t\t\t\t") .append(address.fqdn()) .append(" [").append(address.ipAddress()).append("]"); } info.append("\n\t\t\tSSL certificate name: "); ApplicationGatewaySslCertificate cert = rule.sslCertificate(); if (cert == null) { info.append("(None)"); } else { info.append(cert.name()); } info.append("\n\t\t\tAssociated backend address pool: "); ApplicationGatewayBackend backend = rule.backend(); if (backend == null) { info.append("(None)"); } else { info.append(backend.name()); } info.append("\n\t\t\tAssociated backend HTTP settings configuration: "); ApplicationGatewayBackendHttpConfiguration config = rule.backendHttpConfiguration(); if (config == null) { info.append("(None)"); } else { info.append(config.name()); } info.append("\n\t\t\tAssociated frontend listener: "); ApplicationGatewayListener listener = rule.listener(); if (listener == null) { info.append("(None)"); } else { info.append(config.name()); } } System.out.println(info.toString()); } /** * Prints information of a virtual machine custom image. * * @param image the image */ public static void print(VirtualMachineCustomImage image) { StringBuilder builder = new StringBuilder().append("Virtual machine custom image: ").append(image.id()) .append("Name: ").append(image.name()) .append("\n\tResource group: ").append(image.resourceGroupName()) .append("\n\tCreated from virtual machine: ").append(image.sourceVirtualMachineId()); builder.append("\n\tOS disk image: ") .append("\n\t\tOperating system: ").append(image.osDiskImage().osType()) .append("\n\t\tOperating system state: ").append(image.osDiskImage().osState()) .append("\n\t\tCaching: ").append(image.osDiskImage().caching()) .append("\n\t\tSize (GB): ").append(image.osDiskImage().diskSizeGB()); if (image.isCreatedFromVirtualMachine()) { builder.append("\n\t\tSource virtual machine: ").append(image.sourceVirtualMachineId()); } if (image.osDiskImage().managedDisk() != null) { builder.append("\n\t\tSource managed disk: ").append(image.osDiskImage().managedDisk().id()); } if (image.osDiskImage().snapshot() != null) { builder.append("\n\t\tSource snapshot: ").append(image.osDiskImage().snapshot().id()); } if (image.osDiskImage().blobUri() != null) { builder.append("\n\t\tSource un-managed vhd: ").append(image.osDiskImage().blobUri()); } if (image.dataDiskImages() != null) { for (ImageDataDisk diskImage : image.dataDiskImages().values()) { builder.append("\n\tDisk Image (Lun) .append("\n\t\tCaching: ").append(diskImage.caching()) .append("\n\t\tSize (GB): ").append(diskImage.diskSizeGB()); if (image.isCreatedFromVirtualMachine()) { builder.append("\n\t\tSource virtual machine: ").append(image.sourceVirtualMachineId()); } if (diskImage.managedDisk() != null) { builder.append("\n\t\tSource managed disk: ").append(diskImage.managedDisk().id()); } if (diskImage.snapshot() != null) { builder.append("\n\t\tSource snapshot: ").append(diskImage.snapshot().id()); } if (diskImage.blobUri() != null) { builder.append("\n\t\tSource un-managed vhd: ").append(diskImage.blobUri()); } } } System.out.println(builder.toString()); } /** * Uploads a file to an Azure app service for Web App. * * @param profile the publishing profile for the app service. * @param fileName the name of the file on server * @param file the local file */ public static void uploadFileViaFtp(PublishingProfile profile, String fileName, InputStream file) { String path = "./site/wwwroot/webapps"; uploadFileViaFtp(profile, fileName, file, path); } /** * Uploads a file to an Azure app service for Function App. * * @param profile the publishing profile for the app service. * @param fileName the name of the file on server * @param file the local file */ public static void uploadFileForFunctionViaFtp(PublishingProfile profile, String fileName, InputStream file) { String path = "./site/wwwroot"; uploadFileViaFtp(profile, fileName, file, path); } private static void uploadFileViaFtp(PublishingProfile profile, String fileName, InputStream file, String path) { FTPClient ftpClient = new FTPClient(); String[] ftpUrlSegments = profile.ftpUrl().split("/", 2); String server = ftpUrlSegments[0]; if (fileName.contains("/")) { int lastslash = fileName.lastIndexOf('/'); path = path + "/" + fileName.substring(0, lastslash); fileName = fileName.substring(lastslash + 1); } try { ftpClient.connect(server); ftpClient.enterLocalPassiveMode(); ftpClient.login(profile.ftpUsername(), profile.ftpPassword()); ftpClient.setFileType(FTP.BINARY_FILE_TYPE); for (String segment : path.split("/")) { if (!ftpClient.changeWorkingDirectory(segment)) { ftpClient.makeDirectory(segment); ftpClient.changeWorkingDirectory(segment); } } ftpClient.storeFile(fileName, file); ftpClient.disconnect(); } catch (IOException e) { e.printStackTrace(); } } /** * Print service bus namespace info. * * @param serviceBusNamespace a service bus namespace */ public static void print(ServiceBusNamespace serviceBusNamespace) { StringBuilder builder = new StringBuilder() .append("Service bus Namespace: ").append(serviceBusNamespace.id()) .append("\n\tName: ").append(serviceBusNamespace.name()) .append("\n\tRegion: ").append(serviceBusNamespace.regionName()) .append("\n\tResourceGroupName: ").append(serviceBusNamespace.resourceGroupName()) .append("\n\tCreatedAt: ").append(serviceBusNamespace.createdAt()) .append("\n\tUpdatedAt: ").append(serviceBusNamespace.updatedAt()) .append("\n\tDnsLabel: ").append(serviceBusNamespace.dnsLabel()) .append("\n\tFQDN: ").append(serviceBusNamespace.fqdn()) .append("\n\tSku: ") .append("\n\t\tCapacity: ").append(serviceBusNamespace.sku().capacity()) .append("\n\t\tSkuName: ").append(serviceBusNamespace.sku().name()) .append("\n\t\tTier: ").append(serviceBusNamespace.sku().tier()); System.out.println(builder.toString()); } /** * Print service bus queue info. * * @param queue a service bus queue */ public static void print(Queue queue) { StringBuilder builder = new StringBuilder() .append("Service bus Queue: ").append(queue.id()) .append("\n\tName: ").append(queue.name()) .append("\n\tResourceGroupName: ").append(queue.resourceGroupName()) .append("\n\tCreatedAt: ").append(queue.createdAt()) .append("\n\tUpdatedAt: ").append(queue.updatedAt()) .append("\n\tAccessedAt: ").append(queue.accessedAt()) .append("\n\tActiveMessageCount: ").append(queue.activeMessageCount()) .append("\n\tCurrentSizeInBytes: ").append(queue.currentSizeInBytes()) .append("\n\tDeadLetterMessageCount: ").append(queue.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(queue.defaultMessageTtlDuration()) .append("\n\tDuplicateMessageDetectionHistoryDuration: ").append(queue.duplicateMessageDetectionHistoryDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(queue.isBatchedOperationsEnabled()) .append("\n\tIsDeadLetteringEnabledForExpiredMessages: ").append(queue.isDeadLetteringEnabledForExpiredMessages()) .append("\n\tIsDuplicateDetectionEnabled: ").append(queue.isDuplicateDetectionEnabled()) .append("\n\tIsExpressEnabled: ").append(queue.isExpressEnabled()) .append("\n\tIsPartitioningEnabled: ").append(queue.isPartitioningEnabled()) .append("\n\tIsSessionEnabled: ").append(queue.isSessionEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(queue.deleteOnIdleDurationInMinutes()) .append("\n\tMaxDeliveryCountBeforeDeadLetteringMessage: ").append(queue.maxDeliveryCountBeforeDeadLetteringMessage()) .append("\n\tMaxSizeInMB: ").append(queue.maxSizeInMB()) .append("\n\tMessageCount: ").append(queue.messageCount()) .append("\n\tScheduledMessageCount: ").append(queue.scheduledMessageCount()) .append("\n\tStatus: ").append(queue.status()) .append("\n\tTransferMessageCount: ").append(queue.transferMessageCount()) .append("\n\tLockDurationInSeconds: ").append(queue.lockDurationInSeconds()) .append("\n\tTransferDeadLetterMessageCount: ").append(queue.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print service bus queue authorization keys info. * * @param queueAuthorizationRule a service bus queue authorization keys */ public static void print(QueueAuthorizationRule queueAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus queue authorization rule: ").append(queueAuthorizationRule.id()) .append("\n\tName: ").append(queueAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(queueAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(queueAuthorizationRule.namespaceName()) .append("\n\tQueue Name: ").append(queueAuthorizationRule.queueName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = queueAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print service bus namespace authorization keys info. * * @param keys a service bus namespace authorization keys */ public static void print(AuthorizationKeys keys) { StringBuilder builder = new StringBuilder() .append("Authorization keys: ") .append("\n\tPrimaryKey: ").append(keys.primaryKey()) .append("\n\tPrimaryConnectionString: ").append(keys.primaryConnectionString()) .append("\n\tSecondaryKey: ").append(keys.secondaryKey()) .append("\n\tSecondaryConnectionString: ").append(keys.secondaryConnectionString()); System.out.println(builder.toString()); } /** * Print service bus namespace authorization rule info. * * @param namespaceAuthorizationRule a service bus namespace authorization rule */ public static void print(NamespaceAuthorizationRule namespaceAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus queue authorization rule: ").append(namespaceAuthorizationRule.id()) .append("\n\tName: ").append(namespaceAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(namespaceAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(namespaceAuthorizationRule.namespaceName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = namespaceAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print service bus topic info. * * @param topic a service bus topic */ public static void print(Topic topic) { StringBuilder builder = new StringBuilder() .append("Service bus topic: ").append(topic.id()) .append("\n\tName: ").append(topic.name()) .append("\n\tResourceGroupName: ").append(topic.resourceGroupName()) .append("\n\tCreatedAt: ").append(topic.createdAt()) .append("\n\tUpdatedAt: ").append(topic.updatedAt()) .append("\n\tAccessedAt: ").append(topic.accessedAt()) .append("\n\tActiveMessageCount: ").append(topic.activeMessageCount()) .append("\n\tCurrentSizeInBytes: ").append(topic.currentSizeInBytes()) .append("\n\tDeadLetterMessageCount: ").append(topic.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(topic.defaultMessageTtlDuration()) .append("\n\tDuplicateMessageDetectionHistoryDuration: ").append(topic.duplicateMessageDetectionHistoryDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(topic.isBatchedOperationsEnabled()) .append("\n\tIsDuplicateDetectionEnabled: ").append(topic.isDuplicateDetectionEnabled()) .append("\n\tIsExpressEnabled: ").append(topic.isExpressEnabled()) .append("\n\tIsPartitioningEnabled: ").append(topic.isPartitioningEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(topic.deleteOnIdleDurationInMinutes()) .append("\n\tMaxSizeInMB: ").append(topic.maxSizeInMB()) .append("\n\tScheduledMessageCount: ").append(topic.scheduledMessageCount()) .append("\n\tStatus: ").append(topic.status()) .append("\n\tTransferMessageCount: ").append(topic.transferMessageCount()) .append("\n\tSubscriptionCount: ").append(topic.subscriptionCount()) .append("\n\tTransferDeadLetterMessageCount: ").append(topic.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print service bus subscription info. * * @param serviceBusSubscription a service bus subscription */ public static void print(ServiceBusSubscription serviceBusSubscription) { StringBuilder builder = new StringBuilder() .append("Service bus subscription: ").append(serviceBusSubscription.id()) .append("\n\tName: ").append(serviceBusSubscription.name()) .append("\n\tResourceGroupName: ").append(serviceBusSubscription.resourceGroupName()) .append("\n\tCreatedAt: ").append(serviceBusSubscription.createdAt()) .append("\n\tUpdatedAt: ").append(serviceBusSubscription.updatedAt()) .append("\n\tAccessedAt: ").append(serviceBusSubscription.accessedAt()) .append("\n\tActiveMessageCount: ").append(serviceBusSubscription.activeMessageCount()) .append("\n\tDeadLetterMessageCount: ").append(serviceBusSubscription.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(serviceBusSubscription.defaultMessageTtlDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(serviceBusSubscription.isBatchedOperationsEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(serviceBusSubscription.deleteOnIdleDurationInMinutes()) .append("\n\tScheduledMessageCount: ").append(serviceBusSubscription.scheduledMessageCount()) .append("\n\tStatus: ").append(serviceBusSubscription.status()) .append("\n\tTransferMessageCount: ").append(serviceBusSubscription.transferMessageCount()) .append("\n\tIsDeadLetteringEnabledForExpiredMessages: ").append(serviceBusSubscription.isDeadLetteringEnabledForExpiredMessages()) .append("\n\tIsSessionEnabled: ").append(serviceBusSubscription.isSessionEnabled()) .append("\n\tLockDurationInSeconds: ").append(serviceBusSubscription.lockDurationInSeconds()) .append("\n\tMaxDeliveryCountBeforeDeadLetteringMessage: ").append(serviceBusSubscription.maxDeliveryCountBeforeDeadLetteringMessage()) .append("\n\tIsDeadLetteringEnabledForFilterEvaluationFailedMessages: ").append(serviceBusSubscription.isDeadLetteringEnabledForFilterEvaluationFailedMessages()) .append("\n\tTransferMessageCount: ").append(serviceBusSubscription.transferMessageCount()) .append("\n\tTransferDeadLetterMessageCount: ").append(serviceBusSubscription.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print topic Authorization Rule info. * * @param topicAuthorizationRule a topic Authorization Rule */ public static void print(TopicAuthorizationRule topicAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus topic authorization rule: ").append(topicAuthorizationRule.id()) .append("\n\tName: ").append(topicAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(topicAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(topicAuthorizationRule.namespaceName()) .append("\n\tTopic Name: ").append(topicAuthorizationRule.topicName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = topicAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print CosmosDB info. * * @param cosmosDBAccount a CosmosDB */ public static void print(CosmosDBAccount cosmosDBAccount) { StringBuilder builder = new StringBuilder() .append("CosmosDB: ").append(cosmosDBAccount.id()) .append("\n\tName: ").append(cosmosDBAccount.name()) .append("\n\tResourceGroupName: ").append(cosmosDBAccount.resourceGroupName()) .append("\n\tKind: ").append(cosmosDBAccount.kind().toString()) .append("\n\tDefault consistency level: ").append(cosmosDBAccount.consistencyPolicy().defaultConsistencyLevel()) .append("\n\tIP range filter: ").append(cosmosDBAccount.ipRangeFilter()); DatabaseAccountListKeysResult keys = cosmosDBAccount.listKeys(); DatabaseAccountListReadOnlyKeysResult readOnlyKeys = cosmosDBAccount.listReadOnlyKeys(); builder .append("\n\tPrimary Master Key: ").append(keys.primaryMasterKey()) .append("\n\tSecondary Master Key: ").append(keys.secondaryMasterKey()) .append("\n\tPrimary Read-Only Key: ").append(readOnlyKeys.primaryReadonlyMasterKey()) .append("\n\tSecondary Read-Only Key: ").append(readOnlyKeys.secondaryReadonlyMasterKey()); for (Location writeReplica : cosmosDBAccount.writableReplications()) { builder.append("\n\t\tWrite replication: ") .append("\n\t\t\tName :").append(writeReplica.locationName()); } builder.append("\n\tNumber of read replications: ").append(cosmosDBAccount.readableReplications().size()); for (Location readReplica : cosmosDBAccount.readableReplications()) { builder.append("\n\t\tRead replication: ") .append("\n\t\t\tName :").append(readReplica.locationName()); } } /** * Print Active Directory User info. * * @param user active directory user */ public static void print(ActiveDirectoryUser user) { StringBuilder builder = new StringBuilder() .append("Active Directory User: ").append(user.id()) .append("\n\tName: ").append(user.name()) .append("\n\tMail: ").append(user.mail()) .append("\n\tMail Nickname: ").append(user.mailNickname()) .append("\n\tUser Principal Name: ").append(user.userPrincipalName()); System.out.println(builder.toString()); } /** * Print Active Directory User info. * * @param role role definition */ public static void print(RoleDefinition role) { StringBuilder builder = new StringBuilder() .append("Role Definition: ").append(role.id()) .append("\n\tName: ").append(role.name()) .append("\n\tRole Name: ").append(role.roleName()) .append("\n\tType: ").append(role.type()) .append("\n\tDescription: ").append(role.description()) .append("\n\tType: ").append(role.type()); Set<Permission> permissions = role.permissions(); builder.append("\n\tPermissions: ").append(permissions.size()); for (Permission permission : permissions) { builder.append("\n\t\tPermission Actions: " + permission.actions().size()); for (String action : permission.actions()) { builder.append("\n\t\t\tName :").append(action); } builder.append("\n\t\tPermission Not Actions: " + permission.notActions().size()); for (String notAction : permission.notActions()) { builder.append("\n\t\t\tName :").append(notAction); } builder.append("\n\t\tPermission Data Actions: " + permission.dataActions().size()); for (String dataActions : permission.dataActions()) { builder.append("\n\t\t\tName :").append(dataActions); } builder.append("\n\t\tPermission Not Data Actions: " + permission.notDataActions().size()); for (String notDataActions : permission.notDataActions()) { builder.append("\n\t\t\tName :").append(notDataActions); } } Set<String> assignableScopes = role.assignableScopes(); builder.append("\n\tAssignable scopes: ").append(assignableScopes.size()); for (String scope : assignableScopes) { builder.append("\n\t\tAssignable Scope: ") .append("\n\t\t\tName :").append(scope); } System.out.println(builder.toString()); } /** * Print Role Assignment info. * * @param roleAssignment role assignment */ public static void print(RoleAssignment roleAssignment) { StringBuilder builder = new StringBuilder() .append("Role Assignment: ") .append("\n\tScope: ").append(roleAssignment.scope()) .append("\n\tPrincipal Id: ").append(roleAssignment.principalId()) .append("\n\tRole Definition Id: ").append(roleAssignment.roleDefinitionId()); System.out.println(builder.toString()); } /** * Print Active Directory Group info. * * @param group active directory group */ public static void print(ActiveDirectoryGroup group) { StringBuilder builder = new StringBuilder() .append("Active Directory Group: ").append(group.id()) .append("\n\tName: ").append(group.name()) .append("\n\tMail: ").append(group.mail()) .append("\n\tSecurity Enabled: ").append(group.securityEnabled()) .append("\n\tGroup members:"); for (ActiveDirectoryObject object : group.listMembers()) { builder.append("\n\t\tType: ").append(object.getClass().getSimpleName()) .append("\tName: ").append(object.name()); } System.out.println(builder.toString()); } /** * Print Active Directory Application info. * * @param application active directory application */ public static void print(ActiveDirectoryApplication application) { StringBuilder builder = new StringBuilder() .append("Active Directory Application: ").append(application.id()) .append("\n\tName: ").append(application.name()) .append("\n\tSign on URL: ").append(application.signOnUrl()) .append("\n\tReply URLs:"); for (String replyUrl : application.replyUrls()) { builder.append("\n\t\t").append(replyUrl); } System.out.println(builder.toString()); } /** * Print Service Principal info. * * @param servicePrincipal service principal */ public static void print(ServicePrincipal servicePrincipal) { StringBuilder builder = new StringBuilder() .append("Service Principal: ").append(servicePrincipal.id()) .append("\n\tName: ").append(servicePrincipal.name()) .append("\n\tApplication Id: ").append(servicePrincipal.applicationId()); List<String> names = servicePrincipal.servicePrincipalNames(); builder.append("\n\tNames: ").append(names.size()); for (String name : names) { builder.append("\n\t\tName: ").append(name); } System.out.println(builder.toString()); } /** * Print Network Watcher info. * * @param nw network watcher */ public static void print(NetworkWatcher nw) { StringBuilder builder = new StringBuilder() .append("Network Watcher: ").append(nw.id()) .append("\n\tName: ").append(nw.name()) .append("\n\tResource group name: ").append(nw.resourceGroupName()) .append("\n\tRegion name: ").append(nw.regionName()); System.out.println(builder.toString()); } /** * Print packet capture info. * * @param resource packet capture */ public static void print(PacketCapture resource) { StringBuilder sb = new StringBuilder().append("Packet Capture: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tTarget id: ").append(resource.targetId()) .append("\n\tTime limit in seconds: ").append(resource.timeLimitInSeconds()) .append("\n\tBytes to capture per packet: ").append(resource.bytesToCapturePerPacket()) .append("\n\tProvisioning state: ").append(resource.provisioningState()) .append("\n\tStorage location:") .append("\n\tStorage account id: ").append(resource.storageLocation().storageId()) .append("\n\tStorage account path: ").append(resource.storageLocation().storagePath()) .append("\n\tFile path: ").append(resource.storageLocation().filePath()) .append("\n\t Packet capture filters: ").append(resource.filters().size()); for (PacketCaptureFilter filter : resource.filters()) { sb.append("\n\t\tProtocol: ").append(filter.protocol()); sb.append("\n\t\tLocal IP address: ").append(filter.localIpAddress()); sb.append("\n\t\tRemote IP address: ").append(filter.remoteIpAddress()); sb.append("\n\t\tLocal port: ").append(filter.localPort()); sb.append("\n\t\tRemote port: ").append(filter.remotePort()); } System.out.println(sb.toString()); } /** * Print verification IP flow info. * * @param resource IP flow verification info */ public static void print(VerificationIPFlow resource) { System.out.println(new StringBuilder("IP flow verification: ") .append("\n\tAccess: ").append(resource.access()) .append("\n\tRule name: ").append(resource.ruleName()) .toString()); } /** * Print topology info. * * @param resource topology */ public static void print(Topology resource) { StringBuilder sb = new StringBuilder().append("Topology: ").append(resource.id()) .append("\n\tTopology parameters: ") .append("\n\t\tResource group: ").append(resource.topologyParameters().targetResourceGroupName()) .append("\n\t\tVirtual network: ").append(resource.topologyParameters().targetVirtualNetwork() == null ? "" : resource.topologyParameters().targetVirtualNetwork().id()) .append("\n\t\tSubnet id: ").append(resource.topologyParameters().targetSubnet() == null ? "" : resource.topologyParameters().targetSubnet().id()) .append("\n\tCreated time: ").append(resource.createdTime()) .append("\n\tLast modified time: ").append(resource.lastModifiedTime()); for (TopologyResource tr : resource.resources().values()) { sb.append("\n\tTopology resource: ").append(tr.id()) .append("\n\t\tName: ").append(tr.name()) .append("\n\t\tLocation: ").append(tr.location()) .append("\n\t\tAssociations:"); for (TopologyAssociation association : tr.associations()) { sb.append("\n\t\t\tName:").append(association.name()) .append("\n\t\t\tResource id:").append(association.resourceId()) .append("\n\t\t\tAssociation type:").append(association.associationType()); } } System.out.println(sb.toString()); } /** * Print flow log settings info. * * @param resource flow log settings */ public static void print(FlowLogSettings resource) { System.out.println(new StringBuilder().append("Flow log settings: ") .append("Target resource id: ").append(resource.targetResourceId()) .append("\n\tFlow log enabled: ").append(resource.enabled()) .append("\n\tStorage account id: ").append(resource.storageId()) .append("\n\tRetention policy enabled: ").append(resource.isRetentionEnabled()) .append("\n\tRetention policy days: ").append(resource.retentionDays()) .toString()); } /** * Print availability set info. * * @param resource an availability set */ public static void print(SecurityGroupView resource) { StringBuilder sb = new StringBuilder().append("Security group view: ") .append("\n\tVirtual machine id: ").append(resource.vmId()); for (SecurityGroupNetworkInterface sgni : resource.networkInterfaces().values()) { sb.append("\n\tSecurity group network interface:").append(sgni.id()) .append("\n\t\tSecurity group network interface:") .append("\n\t\tEffective security rules:"); for (EffectiveNetworkSecurityRule rule : sgni.securityRuleAssociations().effectiveSecurityRules()) { sb.append("\n\t\t\tName: ").append(rule.name()) .append("\n\t\t\tDirection: ").append(rule.direction()) .append("\n\t\t\tAccess: ").append(rule.access()) .append("\n\t\t\tPriority: ").append(rule.priority()) .append("\n\t\t\tSource address prefix: ").append(rule.sourceAddressPrefix()) .append("\n\t\t\tSource port range: ").append(rule.sourcePortRange()) .append("\n\t\t\tDestination address prefix: ").append(rule.destinationAddressPrefix()) .append("\n\t\t\tDestination port range: ").append(rule.destinationPortRange()) .append("\n\t\t\tProtocol: ").append(rule.protocol()); } sb.append("\n\t\tSubnet:").append(sgni.securityRuleAssociations().subnetAssociation().id()); printSecurityRule(sb, sgni.securityRuleAssociations().subnetAssociation().securityRules()); if (sgni.securityRuleAssociations().networkInterfaceAssociation() != null) { sb.append("\n\t\tNetwork interface:").append(sgni.securityRuleAssociations().networkInterfaceAssociation().id()); printSecurityRule(sb, sgni.securityRuleAssociations().networkInterfaceAssociation().securityRules()); } sb.append("\n\t\tDefault security rules:"); printSecurityRule(sb, sgni.securityRuleAssociations().defaultSecurityRules()); } System.out.println(sb.toString()); } private static void printSecurityRule(StringBuilder sb, List<SecurityRuleInner> rules) { for (SecurityRuleInner rule : rules) { sb.append("\n\t\t\tName: ").append(rule.name()) .append("\n\t\t\tDirection: ").append(rule.direction()) .append("\n\t\t\tAccess: ").append(rule.access()) .append("\n\t\t\tPriority: ").append(rule.priority()) .append("\n\t\t\tSource address prefix: ").append(rule.sourceAddressPrefix()) .append("\n\t\t\tSource port range: ").append(rule.sourcePortRange()) .append("\n\t\t\tDestination address prefix: ").append(rule.destinationAddressPrefix()) .append("\n\t\t\tDestination port range: ").append(rule.destinationPortRange()) .append("\n\t\t\tProtocol: ").append(rule.protocol()) .append("\n\t\t\tDescription: ").append(rule.description()) .append("\n\t\t\tProvisioning state: ").append(rule.provisioningState()); } } /** * Print next hop info. * * @param resource an availability set */ public static void print(NextHop resource) { System.out.println(new StringBuilder("Next hop: ") .append("Next hop type: ").append(resource.nextHopType()) .append("\n\tNext hop ip address: ").append(resource.nextHopIpAddress()) .append("\n\tRoute table id: ").append(resource.routeTableId()) .toString()); } /** * Print container group info. * * @param resource a container group */ public static void print(ContainerGroup resource) { StringBuilder info = new StringBuilder().append("Container Group: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tOS type: ").append(resource.osType()); if (resource.ipAddress() != null) { info.append("\n\tPublic IP address: ").append(resource.ipAddress()); } if (resource.externalTcpPorts() != null) { info.append("\n\tExternal TCP ports:"); for (int port : resource.externalTcpPorts()) { info.append(" ").append(port); } } if (resource.externalUdpPorts() != null) { info.append("\n\tExternal UDP ports:"); for (int port : resource.externalUdpPorts()) { info.append(" ").append(port); } } if (resource.imageRegistryServers() != null) { info.append("\n\tPrivate Docker image registries:"); for (String server : resource.imageRegistryServers()) { info.append(" ").append(server); } } if (resource.volumes() != null) { info.append("\n\tVolume mapping: "); for (Map.Entry<String, Volume> entry : resource.volumes().entrySet()) { info.append("\n\t\tName: ").append(entry.getKey()).append(" -> ") .append(entry.getValue().azureFile() != null ? entry.getValue().azureFile().shareName() : "empty direcory volume"); } } if (resource.containers() != null) { info.append("\n\tContainer instances: "); for (Map.Entry<String, Container> entry : resource.containers().entrySet()) { Container container = entry.getValue(); info.append("\n\t\tName: ").append(entry.getKey()).append(" -> ").append(container.image()); info.append("\n\t\t\tResources: "); info.append(container.resources().requests().cpu()).append("CPUs "); info.append(container.resources().requests().memoryInGB()).append("GB"); info.append("\n\t\t\tPorts:"); for (ContainerPort port : container.ports()) { info.append(" ").append(port.port()); } if (container.volumeMounts() != null) { info.append("\n\t\t\tVolume mounts:"); for (VolumeMount volumeMount : container.volumeMounts()) { info.append(" ").append(volumeMount.name()).append("->").append(volumeMount.mountPath()); } } if (container.command() != null) { info.append("\n\t\t\tStart commands:"); for (String command : container.command()) { info.append("\n\t\t\t\t").append(command); } } if (container.environmentVariables() != null) { info.append("\n\t\t\tENV vars:"); for (EnvironmentVariable envVar : container.environmentVariables()) { info.append("\n\t\t\t\t").append(envVar.name()).append("=").append(envVar.value()); } } } } System.out.println(info.toString()); } /** * Print event hub namespace. * * @param resource a virtual machine */ public static void print(EventHubNamespace resource) { StringBuilder info = new StringBuilder(); info.append("Eventhub Namespace: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tAzureInsightMetricId: ").append(resource.azureInsightMetricId()) .append("\n\tIsAutoScale enabled: ").append(resource.isAutoScaleEnabled()) .append("\n\tServiceBus endpoint: ").append(resource.serviceBusEndpoint()) .append("\n\tThroughPut upper limit: ").append(resource.throughputUnitsUpperLimit()) .append("\n\tCurrent ThroughPut: ").append(resource.currentThroughputUnits()) .append("\n\tCreated time: ").append(resource.createdAt()) .append("\n\tUpdated time: ").append(resource.updatedAt()); System.out.println(info.toString()); } /** * Print event hub. * * @param resource event hub */ public static void print(EventHub resource) { StringBuilder info = new StringBuilder(); info.append("Eventhub: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tNamespace resource group: ").append(resource.namespaceResourceGroupName()) .append("\n\tNamespace: ").append(resource.namespaceName()) .append("\n\tIs data capture enabled: ").append(resource.isDataCaptureEnabled()) .append("\n\tPartition ids: ").append(resource.partitionIds()); if (resource.isDataCaptureEnabled()) { info.append("\n\t\t\tData capture window size in MB: ").append(resource.dataCaptureWindowSizeInMB()); info.append("\n\t\t\tData capture window size in seconds: ").append(resource.dataCaptureWindowSizeInSeconds()); if (resource.captureDestination() != null) { info.append("\n\t\t\tData capture storage account: ").append(resource.captureDestination().storageAccountResourceId()); info.append("\n\t\t\tData capture storage container: ").append(resource.captureDestination().blobContainer()); } } System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing. * * @param resource event hub namespace disaster recovery pairing */ public static void print(EventHubDisasterRecoveryPairing resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tPrimary namespace resource group name: ").append(resource.primaryNamespaceResourceGroupName()) .append("\n\tPrimary namespace name: ").append(resource.primaryNamespaceName()) .append("\n\tSecondary namespace: ").append(resource.secondaryNamespaceId()) .append("\n\tNamespace role: ").append(resource.namespaceRole()); System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing auth rules. * * @param resource event hub namespace disaster recovery pairing auth rule */ public static void print(DisasterRecoveryPairingAuthorizationRule resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing auth rule: ").append(resource.name()); List<String> rightsStr = new ArrayList<>(); for (AccessRights rights : resource.rights()) { rightsStr.add(rights.toString()); } info.append("\n\tRights: ").append(rightsStr); System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing auth rule key. * * @param resource event hub namespace disaster recovery pairing auth rule key */ public static void print(DisasterRecoveryPairingAuthorizationKey resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing auth key: ") .append("\n\t Alias primary connection string: ").append(resource.aliasPrimaryConnectionString()) .append("\n\t Alias secondary connection string: ").append(resource.aliasSecondaryConnectionString()) .append("\n\t Primary key: ").append(resource.primaryKey()) .append("\n\t Secondary key: ").append(resource.secondaryKey()) .append("\n\t Primary connection string: ").append(resource.primaryConnectionString()) .append("\n\t Secondary connection string: ").append(resource.secondaryConnectionString()); System.out.println(info.toString()); } /** * Print event hub consumer group. * * @param resource event hub consumer group */ public static void print(EventHubConsumerGroup resource) { StringBuilder info = new StringBuilder(); info.append("Event hub consumer group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tNamespace resource group: ").append(resource.namespaceResourceGroupName()) .append("\n\tNamespace: ").append(resource.namespaceName()) .append("\n\tEvent hub name: ").append(resource.eventHubName()) .append("\n\tUser metadata: ").append(resource.userMetadata()); System.out.println(info.toString()); } /** * Print Diagnostic Setting. * * @param resource Diagnostic Setting instance */ public static void print(DiagnosticSetting resource) { StringBuilder info = new StringBuilder("Diagnostic Setting: ") .append("\n\tId: ").append(resource.id()) .append("\n\tAssociated resource Id: ").append(resource.resourceId()) .append("\n\tName: ").append(resource.name()) .append("\n\tStorage Account Id: ").append(resource.storageAccountId()) .append("\n\tEventHub Namespace Autorization Rule Id: ").append(resource.eventHubAuthorizationRuleId()) .append("\n\tEventHub name: ").append(resource.eventHubName()) .append("\n\tLog Analytics workspace Id: ").append(resource.workspaceId()); if (resource.logs() != null && !resource.logs().isEmpty()) { info.append("\n\tLog Settings: "); for (LogSettings ls : resource.logs()) { info.append("\n\t\tCategory: ").append(ls.category()); info.append("\n\t\tRetention policy: "); if (ls.retentionPolicy() != null) { info.append(ls.retentionPolicy().days() + " days"); } else { info.append("NONE"); } } } if (resource.metrics() != null && !resource.metrics().isEmpty()) { info.append("\n\tMetric Settings: "); for (MetricSettings ls : resource.metrics()) { info.append("\n\t\tCategory: ").append(ls.category()); info.append("\n\t\tTimegrain: ").append(ls.timeGrain()); info.append("\n\t\tRetention policy: "); if (ls.retentionPolicy() != null) { info.append(ls.retentionPolicy().days() + " days"); } else { info.append("NONE"); } } } System.out.println(info.toString()); } /** * Print Action group settings. * * @param actionGroup action group instance */ public static void print(ActionGroup actionGroup) { StringBuilder info = new StringBuilder("Action Group: ") .append("\n\tId: ").append(actionGroup.id()) .append("\n\tName: ").append(actionGroup.name()) .append("\n\tShort Name: ").append(actionGroup.shortName()); if (actionGroup.emailReceivers() != null && !actionGroup.emailReceivers().isEmpty()) { info.append("\n\tEmail receivers: "); for (EmailReceiver er : actionGroup.emailReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tEMail: ").append(er.emailAddress()); info.append("\n\t\tStatus: ").append(er.status()); info.append("\n\t\t==="); } } if (actionGroup.smsReceivers() != null && !actionGroup.smsReceivers().isEmpty()) { info.append("\n\tSMS text message receivers: "); for (SmsReceiver er : actionGroup.smsReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tPhone: ").append(er.countryCode() + er.phoneNumber()); info.append("\n\t\tStatus: ").append(er.status()); info.append("\n\t\t==="); } } if (actionGroup.webhookReceivers() != null && !actionGroup.webhookReceivers().isEmpty()) { info.append("\n\tWebhook receivers: "); for (WebhookReceiver er : actionGroup.webhookReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tURI: ").append(er.serviceUri()); info.append("\n\t\t==="); } } if (actionGroup.pushNotificationReceivers() != null && !actionGroup.pushNotificationReceivers().isEmpty()) { info.append("\n\tApp Push Notification receivers: "); for (AzureAppPushReceiver er : actionGroup.pushNotificationReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tEmail: ").append(er.emailAddress()); info.append("\n\t\t==="); } } if (actionGroup.voiceReceivers() != null && !actionGroup.voiceReceivers().isEmpty()) { info.append("\n\tVoice Message receivers: "); for (VoiceReceiver er : actionGroup.voiceReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tPhone: ").append(er.countryCode() + er.phoneNumber()); info.append("\n\t\t==="); } } if (actionGroup.automationRunbookReceivers() != null && !actionGroup.automationRunbookReceivers().isEmpty()) { info.append("\n\tAutomation Runbook receivers: "); for (AutomationRunbookReceiver er : actionGroup.automationRunbookReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tRunbook Name: ").append(er.runbookName()); info.append("\n\t\tAccount Id: ").append(er.automationAccountId()); info.append("\n\t\tIs Global: ").append(er.isGlobalRunbook()); info.append("\n\t\tService URI: ").append(er.serviceUri()); info.append("\n\t\tWebhook resource Id: ").append(er.webhookResourceId()); info.append("\n\t\t==="); } } if (actionGroup.azureFunctionReceivers() != null && !actionGroup.azureFunctionReceivers().isEmpty()) { info.append("\n\tAzure Functions receivers: "); for (AzureFunctionReceiver er : actionGroup.azureFunctionReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tFunction Name: ").append(er.functionName()); info.append("\n\t\tFunction App Resource Id: ").append(er.functionAppResourceId()); info.append("\n\t\tFunction Trigger URI: ").append(er.httpTriggerUrl()); info.append("\n\t\t==="); } } if (actionGroup.logicAppReceivers() != null && !actionGroup.logicAppReceivers().isEmpty()) { info.append("\n\tLogic App receivers: "); for (LogicAppReceiver er : actionGroup.logicAppReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tResource Id: ").append(er.resourceId()); info.append("\n\t\tCallback URL: ").append(er.callbackUrl()); info.append("\n\t\t==="); } } if (actionGroup.itsmReceivers() != null && !actionGroup.itsmReceivers().isEmpty()) { info.append("\n\tITSM receivers: "); for (ItsmReceiver er : actionGroup.itsmReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tWorkspace Id: ").append(er.workspaceId()); info.append("\n\t\tConnection Id: ").append(er.connectionId()); info.append("\n\t\tRegion: ").append(er.region()); info.append("\n\t\tTicket Configuration: ").append(er.ticketConfiguration()); info.append("\n\t\t==="); } } System.out.println(info.toString()); } /** * Print activity log alert settings. * * @param activityLogAlert activity log instance */ public static void print(ActivityLogAlert activityLogAlert) { StringBuilder info = new StringBuilder("Activity Log Alert: ") .append("\n\tId: ").append(activityLogAlert.id()) .append("\n\tName: ").append(activityLogAlert.name()) .append("\n\tDescription: ").append(activityLogAlert.description()) .append("\n\tIs Enabled: ").append(activityLogAlert.enabled()); if (activityLogAlert.scopes() != null && !activityLogAlert.scopes().isEmpty()) { info.append("\n\tScopes: "); for (String er : activityLogAlert.scopes()) { info.append("\n\t\tId: ").append(er); } } if (activityLogAlert.actionGroupIds() != null && !activityLogAlert.actionGroupIds().isEmpty()) { info.append("\n\tAction Groups: "); for (String er : activityLogAlert.actionGroupIds()) { info.append("\n\t\tAction Group Id: ").append(er); } } if (activityLogAlert.equalsConditions() != null && !activityLogAlert.equalsConditions().isEmpty()) { info.append("\n\tAlert conditions (when all of is true): "); for (Map.Entry<String, String> er : activityLogAlert.equalsConditions().entrySet()) { info.append("\n\t\t'").append(er.getKey()).append("' equals '").append(er.getValue()).append("'"); } } System.out.println(info.toString()); } /** * Print metric alert settings. * * @param metricAlert metric alert instance */ public static void print(MetricAlert metricAlert) { StringBuilder info = new StringBuilder("Metric Alert: ") .append("\n\tId: ").append(metricAlert.id()) .append("\n\tName: ").append(metricAlert.name()) .append("\n\tDescription: ").append(metricAlert.description()) .append("\n\tIs Enabled: ").append(metricAlert.enabled()) .append("\n\tIs Auto Mitigated: ").append(metricAlert.autoMitigate()) .append("\n\tSeverity: ").append(metricAlert.severity()) .append("\n\tWindow Size: ").append(metricAlert.windowSize()) .append("\n\tEvaluation Frequency: ").append(metricAlert.evaluationFrequency()); if (metricAlert.scopes() != null && !metricAlert.scopes().isEmpty()) { info.append("\n\tScopes: "); for (String er : metricAlert.scopes()) { info.append("\n\t\tId: ").append(er); } } if (metricAlert.actionGroupIds() != null && !metricAlert.actionGroupIds().isEmpty()) { info.append("\n\tAction Groups: "); for (String er : metricAlert.actionGroupIds()) { info.append("\n\t\tAction Group Id: ").append(er); } } if (metricAlert.alertCriterias() != null && !metricAlert.alertCriterias().isEmpty()) { info.append("\n\tAlert conditions (when all of is true): "); for (Map.Entry<String, MetricAlertCondition> er : metricAlert.alertCriterias().entrySet()) { MetricAlertCondition alertCondition = er.getValue(); info.append("\n\t\tCondition name: ").append(er.getKey()) .append("\n\t\tSignal name: ").append(alertCondition.metricName()) .append("\n\t\tMetric Namespace: ").append(alertCondition.metricNamespace()) .append("\n\t\tOperator: ").append(alertCondition.condition()) .append("\n\t\tThreshold: ").append(alertCondition.threshold()) .append("\n\t\tTime Aggregation: ").append(alertCondition.timeAggregation()); if (alertCondition.dimensions() != null && !alertCondition.dimensions().isEmpty()) { for (MetricDimension dimon : alertCondition.dimensions()) { info.append("\n\t\tDimension Filter: ").append("Name [").append(dimon.name()).append("] operator [Include] values["); for (String vals : dimon.values()) { info.append(vals).append(", "); } info.append("]"); } } } } System.out.println(info.toString()); } /** * Print spring service settings. * * @param springService spring service instance */ public static void print(SpringService springService) { StringBuilder info = new StringBuilder("Spring Service: ") .append("\n\tId: ").append(springService.id()) .append("\n\tName: ").append(springService.name()) .append("\n\tResource Group: ").append(springService.resourceGroupName()) .append("\n\tRegion: ").append(springService.region()) .append("\n\tTags: ").append(springService.tags()); ConfigServerProperties serverProperties = springService.getServerProperties(); if (serverProperties != null && serverProperties.provisioningState() != null && serverProperties.provisioningState().equals(ConfigServerState.SUCCEEDED) && serverProperties.configServer() != null) { info.append("\n\tProperties: "); if (serverProperties.configServer().gitProperty() != null) { info.append("\n\t\tGit: ").append(serverProperties.configServer().gitProperty().uri()); } } if (springService.sku() != null) { info.append("\n\tSku: ") .append("\n\t\tName: ").append(springService.sku().name()) .append("\n\t\tTier: ").append(springService.sku().tier()) .append("\n\t\tCapacity: ").append(springService.sku().capacity()); } MonitoringSettingProperties monitoringSettingProperties = springService.getMonitoringSetting(); if (monitoringSettingProperties != null && monitoringSettingProperties.provisioningState() != null && monitoringSettingProperties.provisioningState().equals(MonitoringSettingState.SUCCEEDED)) { info.append("\n\tTrace: ") .append("\n\t\tEnabled: ").append(monitoringSettingProperties.traceEnabled()) .append("\n\t\tApp Insight Instrumentation Key: ").append(monitoringSettingProperties.appInsightsInstrumentationKey()); } System.out.println(info.toString()); } /** * Print spring app settings. * * @param springApp spring app instance */ public static void print(SpringApp springApp) { StringBuilder info = new StringBuilder("Spring Service: ") .append("\n\tId: ").append(springApp.id()) .append("\n\tName: ").append(springApp.name()) .append("\n\tPublic Endpoint: ").append(springApp.isPublic()) .append("\n\tUrl: ").append(springApp.url()) .append("\n\tHttps Only: ").append(springApp.isHttpsOnly()) .append("\n\tFully Qualified Domain Name: ").append(springApp.fqdn()) .append("\n\tActive Deployment Name: ").append(springApp.activeDeploymentName()); if (springApp.temporaryDisk() != null) { info.append("\n\tTemporary Disk:") .append("\n\t\tSize In GB: ").append(springApp.temporaryDisk().sizeInGB()) .append("\n\t\tMount Path: ").append(springApp.temporaryDisk().mountPath()); } if (springApp.persistentDisk() != null) { info.append("\n\tPersistent Disk:") .append("\n\t\tSize In GB: ").append(springApp.persistentDisk().sizeInGB()) .append("\n\t\tMount Path: ").append(springApp.persistentDisk().mountPath()); } if (springApp.identity() != null) { info.append("\n\tIdentity:") .append("\n\t\tType: ").append(springApp.identity().type()) .append("\n\t\tPrincipal Id: ").append(springApp.identity().principalId()) .append("\n\t\tTenant Id: ").append(springApp.identity().tenantId()); } System.out.println(info.toString()); } /** * Print private link resource. * * @param privateLinkResource the private link resource */ public static void print(PrivateLinkResource privateLinkResource) { StringBuilder info = new StringBuilder("Private Link Resource: ") .append("\n\tGroup ID: ").append(privateLinkResource.groupId()) .append("\n\tRequired Member Names: ").append(privateLinkResource.requiredMemberNames()) .append("\n\tRequired DNS Zone Names: ").append(privateLinkResource.requiredDnsZoneNames()); System.out.println(info); } /** * Print private endpoint. * * @param privateEndpoint the private endpoint */ public static void print(PrivateEndpoint privateEndpoint) { StringBuilder info = new StringBuilder("Private Endpoint: ") .append("\n\tId: ").append(privateEndpoint.id()) .append("\n\tName: ").append(privateEndpoint.name()); if (privateEndpoint.privateLinkServiceConnections() != null && !privateEndpoint.privateLinkServiceConnections().isEmpty()) { for (PrivateEndpoint.PrivateLinkServiceConnection connection : privateEndpoint.privateLinkServiceConnections().values()) { info .append("\n\t\tPrivate Link Service Connection Name: ").append(connection.name()) .append("\n\t\tPrivate Link Resource ID: ").append(connection.privateLinkResourceId()) .append("\n\t\tSub Resource Names: ").append(connection.subResourceNames()) .append("\n\t\tProvision Status: ").append(connection.state().status()); } } if (privateEndpoint.privateLinkServiceConnections() != null && !privateEndpoint.privateLinkServiceConnections().isEmpty()) { info.append("\n\tPrivate Link Service Connections:"); for (PrivateEndpoint.PrivateLinkServiceConnection connection : privateEndpoint.privateLinkServiceConnections().values()) { info .append("\n\t\tName: ").append(connection.name()) .append("\n\t\tPrivate Link Resource ID: ").append(connection.privateLinkResourceId()) .append("\n\t\tSub Resource Names: ").append(connection.subResourceNames()) .append("\n\t\tStatus: ").append(connection.state().status()); } } if (privateEndpoint.customDnsConfigurations() != null && !privateEndpoint.customDnsConfigurations().isEmpty()) { info.append("\n\tCustom DNS Configure:"); for (CustomDnsConfigPropertiesFormat customDns : privateEndpoint.customDnsConfigurations()) { info .append("\n\t\tFQDN: ").append(customDns.fqdn()) .append("\n\t\tIP Address: ").append(customDns.ipAddresses()); } } System.out.println(info); } /** * Sends a GET request to target URL. * <p> * Retry logic tuned for AppService. * The method does not handle 301 redirect. * * @param urlString the target URL. * @return Content of the HTTP response. */ public static String sendGetRequest(String urlString) { HttpRequest request = new HttpRequest(HttpMethod.GET, urlString); Mono<Response<String>> response = stringResponse(HTTP_PIPELINE.send(request) .flatMap(response1 -> { int code = response1.getStatusCode(); if (code == 200 || code == 400 || code == 404) { return Mono.just(response1); } else { return Mono.error(new HttpResponseException(response1)); } }) .retryWhen(Retry .fixedDelay(5, Duration.ofSeconds(30)) .filter(t -> { boolean retry = false; if (t instanceof TimeoutException) { retry = true; } else if (t instanceof HttpResponseException && ((HttpResponseException) t).getResponse().getStatusCode() == 503) { retry = true; } if (retry) { LOGGER.info("retry GET request to {}", urlString); } return retry; }))); Response<String> ret = response.block(); return ret == null ? null : ret.getValue(); } /** * Sends a POST request to target URL. * <p> * Retry logic tuned for AppService. * * @param urlString the target URL. * @param body the request body. * @return Content of the HTTP response. * */ public static String sendPostRequest(String urlString, String body) { try { HttpRequest request = new HttpRequest(HttpMethod.POST, urlString).setBody(body); Mono<Response<String>> response = stringResponse(HTTP_PIPELINE.send(request) .flatMap(response1 -> { int code = response1.getStatusCode(); if (code == 200 || code == 400 || code == 404) { return Mono.just(response1); } else { return Mono.error(new HttpResponseException(response1)); } }) .retryWhen(Retry .fixedDelay(5, Duration.ofSeconds(30)) .filter(t -> { boolean retry = false; if (t instanceof TimeoutException) { retry = true; } if (retry) { LOGGER.info("retry POST request to {}", urlString); } return retry; }))); Response<String> ret = response.block(); return ret == null ? null : ret.getValue(); } catch (Exception e) { LOGGER.logThrowableAsError(e); return null; } } private static Mono<Response<String>> stringResponse(Mono<HttpResponse> responseMono) { return responseMono.flatMap(response -> response.getBodyAsString() .map(str -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), str))); } private static final HttpPipeline HTTP_PIPELINE = new HttpPipelineBuilder() .policies( new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)), new RetryPolicy("Retry-After", ChronoUnit.SECONDS)) .build(); /** * Get the size of the iterable. * * @param iterable iterable to count size * @param <T> generic type parameter of the iterable * @return size of the iterable */ public static <T> int getSize(Iterable<T> iterable) { int res = 0; Iterator<T> iterator = iterable.iterator(); while (iterator.hasNext()) { iterator.next(); res++; } return res; } }
class Utils { private static final ClientLogger LOGGER = new ClientLogger(Utils.class); private static String sshPublicKey; private Utils() { } /** @return a generated password */ public static String password() { String password = new ResourceManagerUtils.InternalRuntimeContext().randomResourceName("Pa5$", 12); System.out.printf("Password: %s%n", password); return password; } /** * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } /** * Creates a randomized resource name. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @param prefix the prefix to the name. * @param maxLen the max length of the name. * @return the randomized resource name. */ public static String randomResourceName(AzureResourceManager azure, String prefix, int maxLen) { return azure.resourceGroups().manager().internalContext().randomResourceName(prefix, maxLen); } /** * Generates the specified number of random resource names with the same prefix. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @param prefix the prefix to be used if possible * @param maxLen the maximum length for the random generated name * @param count the number of names to generate * @return the randomized resource names. */ public static String[] randomResourceNames(AzureResourceManager azure, String prefix, int maxLen, int count) { String[] names = new String[count]; for (int i = 0; i < count; i++) { names[i] = randomResourceName(azure, prefix, maxLen); } return names; } /** * Creates a random UUID. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @return the random UUID. */ public static String randomUuid(AzureResourceManager azure) { return azure.resourceGroups().manager().internalContext().randomUuid(); } /** * Creates a randomized resource name. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param authenticated the AzureResourceManager.Authenticated instance. * @param prefix the prefix to the name. * @param maxLen the max length of the name. * @return the randomized resource name. */ public static String randomResourceName(AzureResourceManager.Authenticated authenticated, String prefix, int maxLen) { return authenticated.roleAssignments().manager().internalContext().randomResourceName(prefix, maxLen); } /** * Print resource group info. * * @param resource a resource group */ public static void print(ResourceGroup resource) { StringBuilder info = new StringBuilder(); info.append("Resource Group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()); System.out.println(info.toString()); } /** * Print User Assigned MSI info. * * @param resource a User Assigned MSI */ public static void print(Identity resource) { StringBuilder info = new StringBuilder(); info.append("Resource Group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tService Principal Id: ").append(resource.principalId()) .append("\n\tClient Id: ").append(resource.clientId()) .append("\n\tTenant Id: ").append(resource.tenantId()); System.out.println(info.toString()); } /** * Print virtual machine info. * * @param resource a virtual machine */ public static void print(VirtualMachine resource) { StringBuilder storageProfile = new StringBuilder().append("\n\tStorageProfile: "); if (resource.storageProfile().imageReference() != null) { storageProfile.append("\n\t\tImageReference:"); storageProfile.append("\n\t\t\tPublisher: ").append(resource.storageProfile().imageReference().publisher()); storageProfile.append("\n\t\t\tOffer: ").append(resource.storageProfile().imageReference().offer()); storageProfile.append("\n\t\t\tSKU: ").append(resource.storageProfile().imageReference().sku()); storageProfile.append("\n\t\t\tVersion: ").append(resource.storageProfile().imageReference().version()); } if (resource.storageProfile().osDisk() != null) { storageProfile.append("\n\t\tOSDisk:"); storageProfile.append("\n\t\t\tOSType: ").append(resource.storageProfile().osDisk().osType()); storageProfile.append("\n\t\t\tName: ").append(resource.storageProfile().osDisk().name()); storageProfile.append("\n\t\t\tCaching: ").append(resource.storageProfile().osDisk().caching()); storageProfile.append("\n\t\t\tCreateOption: ").append(resource.storageProfile().osDisk().createOption()); storageProfile.append("\n\t\t\tDiskSizeGB: ").append(resource.storageProfile().osDisk().diskSizeGB()); if (resource.storageProfile().osDisk().managedDisk() != null) { if (resource.storageProfile().osDisk().managedDisk().diskEncryptionSet() != null) { storageProfile.append("\n\t\t\tDiskEncryptionSet Id: ") .append(resource.storageProfile().osDisk().managedDisk().diskEncryptionSet().id()); } } if (resource.storageProfile().osDisk().image() != null) { storageProfile.append("\n\t\t\tImage Uri: ").append(resource.storageProfile().osDisk().image().uri()); } if (resource.storageProfile().osDisk().vhd() != null) { storageProfile.append("\n\t\t\tVhd Uri: ").append(resource.storageProfile().osDisk().vhd().uri()); } if (resource.storageProfile().osDisk().encryptionSettings() != null) { storageProfile.append("\n\t\t\tEncryptionSettings: "); storageProfile.append("\n\t\t\t\tEnabled: ").append(resource.storageProfile().osDisk().encryptionSettings().enabled()); storageProfile.append("\n\t\t\t\tDiskEncryptionKey Uri: ").append(resource .storageProfile() .osDisk() .encryptionSettings() .diskEncryptionKey().secretUrl()); storageProfile.append("\n\t\t\t\tKeyEncryptionKey Uri: ").append(resource .storageProfile() .osDisk() .encryptionSettings() .keyEncryptionKey().keyUrl()); } } if (resource.storageProfile().dataDisks() != null) { int i = 0; for (DataDisk disk : resource.storageProfile().dataDisks()) { storageProfile.append("\n\t\tDataDisk: storageProfile.append("\n\t\t\tName: ").append(disk.name()); storageProfile.append("\n\t\t\tCaching: ").append(disk.caching()); storageProfile.append("\n\t\t\tCreateOption: ").append(disk.createOption()); storageProfile.append("\n\t\t\tDiskSizeGB: ").append(disk.diskSizeGB()); storageProfile.append("\n\t\t\tLun: ").append(disk.lun()); if (resource.isManagedDiskEnabled()) { if (disk.managedDisk() != null) { storageProfile.append("\n\t\t\tManaged Disk Id: ").append(disk.managedDisk().id()); if (disk.managedDisk().diskEncryptionSet() != null) { storageProfile.append("\n\t\t\tDiskEncryptionSet Id: ").append(disk.managedDisk().diskEncryptionSet().id()); } } } else { if (disk.vhd().uri() != null) { storageProfile.append("\n\t\t\tVhd Uri: ").append(disk.vhd().uri()); } } if (disk.image() != null) { storageProfile.append("\n\t\t\tImage Uri: ").append(disk.image().uri()); } } } StringBuilder osProfile = new StringBuilder().append("\n\tOSProfile: "); if (resource.osProfile() != null) { osProfile.append("\n\t\tComputerName:").append(resource.osProfile().computerName()); if (resource.osProfile().windowsConfiguration() != null) { osProfile.append("\n\t\t\tWindowsConfiguration: "); osProfile.append("\n\t\t\t\tProvisionVMAgent: ") .append(resource.osProfile().windowsConfiguration().provisionVMAgent()); osProfile.append("\n\t\t\t\tEnableAutomaticUpdates: ") .append(resource.osProfile().windowsConfiguration().enableAutomaticUpdates()); osProfile.append("\n\t\t\t\tTimeZone: ") .append(resource.osProfile().windowsConfiguration().timeZone()); } if (resource.osProfile().linuxConfiguration() != null) { osProfile.append("\n\t\t\tLinuxConfiguration: "); osProfile.append("\n\t\t\t\tDisablePasswordAuthentication: ") .append(resource.osProfile().linuxConfiguration().disablePasswordAuthentication()); } } else { osProfile.append("null"); } StringBuilder networkProfile = new StringBuilder().append("\n\tNetworkProfile: "); for (String networkInterfaceId : resource.networkInterfaceIds()) { networkProfile.append("\n\t\tId:").append(networkInterfaceId); } StringBuilder extensions = new StringBuilder().append("\n\tExtensions: "); for (Map.Entry<String, VirtualMachineExtension> extensionEntry : resource.listExtensions().entrySet()) { VirtualMachineExtension extension = extensionEntry.getValue(); extensions.append("\n\t\tExtension: ").append(extension.id()) .append("\n\t\t\tName: ").append(extension.name()) .append("\n\t\t\tTags: ").append(extension.tags()) .append("\n\t\t\tProvisioningState: ").append(extension.provisioningState()) .append("\n\t\t\tAuto upgrade minor version enabled: ").append(extension.autoUpgradeMinorVersionEnabled()) .append("\n\t\t\tPublisher: ").append(extension.publisherName()) .append("\n\t\t\tType: ").append(extension.typeName()) .append("\n\t\t\tVersion: ").append(extension.versionName()) .append("\n\t\t\tPublic Settings: ").append(extension.publicSettingsAsJsonString()); } StringBuilder msi = new StringBuilder().append("\n\tMSI: "); msi.append("\n\t\t\tMSI enabled:").append(resource.isManagedServiceIdentityEnabled()); msi.append("\n\t\t\tSystem Assigned MSI Active Directory Service Principal Id:").append(resource.systemAssignedManagedServiceIdentityPrincipalId()); msi.append("\n\t\t\tSystem Assigned MSI Active Directory Tenant Id:").append(resource.systemAssignedManagedServiceIdentityTenantId()); StringBuilder zones = new StringBuilder().append("\n\tZones: "); zones.append(resource.availabilityZones()); StringBuilder securityProfile = new StringBuilder().append("\n\tSecurityProfile: "); securityProfile.append("\n\t\t\tSecurity type: ").append(resource.securityType()); securityProfile.append("\n\t\t\tSecure Boot enabled: ").append(resource.isSecureBootEnabled()); securityProfile.append("\n\t\t\tvTPM enabled: ").append(resource.isVTpmEnabled()); System.out.println(new StringBuilder().append("Virtual Machine: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tHardwareProfile: ") .append("\n\t\tSize: ").append(resource.size()) .append(storageProfile) .append(osProfile) .append(networkProfile) .append(extensions) .append(msi) .append(zones) .append(securityProfile) .toString()); } /** * Print availability set info. * * @param resource an availability set */ public static void print(AvailabilitySet resource) { System.out.println(new StringBuilder().append("Availability Set: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tFault domain count: ").append(resource.faultDomainCount()) .append("\n\tUpdate domain count: ").append(resource.updateDomainCount()) .toString()); } /** * Print network info. * * @param resource a network * @throws ManagementException Cloud errors */ public static void print(Network resource) { StringBuilder info = new StringBuilder(); info.append("Network: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tAddress spaces: ").append(resource.addressSpaces()) .append("\n\tDNS server IPs: ").append(resource.dnsServerIPs()); for (Subnet subnet : resource.subnets().values()) { info.append("\n\tSubnet: ").append(subnet.name()) .append("\n\t\tAddress prefix: ").append(subnet.addressPrefix()); NetworkSecurityGroup subnetNsg = subnet.getNetworkSecurityGroup(); if (subnetNsg != null) { info.append("\n\t\tNetwork security group ID: ").append(subnetNsg.id()); } RouteTable routeTable = subnet.getRouteTable(); if (routeTable != null) { info.append("\n\tRoute table ID: ").append(routeTable.id()); } Map<ServiceEndpointType, List<Region>> services = subnet.servicesWithAccess(); if (services.size() > 0) { info.append("\n\tServices with access"); for (Map.Entry<ServiceEndpointType, List<Region>> service : services.entrySet()) { info.append("\n\t\tService: ") .append(service.getKey()) .append(" Regions: " + service.getValue() + ""); } } } for (NetworkPeering peering : resource.peerings().list()) { info.append("\n\tPeering: ").append(peering.name()) .append("\n\t\tRemote network ID: ").append(peering.remoteNetworkId()) .append("\n\t\tPeering state: ").append(peering.state()) .append("\n\t\tIs traffic forwarded from remote network allowed? ").append(peering.isTrafficForwardingFromRemoteNetworkAllowed()) .append("\n\t\tGateway use: ").append(peering.gatewayUse()); } System.out.println(info.toString()); } /** * Print network interface. * * @param resource a network interface */ public static void print(NetworkInterface resource) { StringBuilder info = new StringBuilder(); info.append("NetworkInterface: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tInternal DNS name label: ").append(resource.internalDnsNameLabel()) .append("\n\tInternal FQDN: ").append(resource.internalFqdn()) .append("\n\tInternal domain name suffix: ").append(resource.internalDomainNameSuffix()) .append("\n\tNetwork security group: ").append(resource.networkSecurityGroupId()) .append("\n\tApplied DNS servers: ").append(resource.appliedDnsServers().toString()) .append("\n\tDNS server IPs: "); for (String dnsServerIp : resource.dnsServers()) { info.append("\n\t\t").append(dnsServerIp); } info.append("\n\tIP forwarding enabled? ").append(resource.isIPForwardingEnabled()) .append("\n\tAccelerated networking enabled? ").append(resource.isAcceleratedNetworkingEnabled()) .append("\n\tMAC Address:").append(resource.macAddress()) .append("\n\tPrivate IP:").append(resource.primaryPrivateIP()) .append("\n\tPrivate allocation method:").append(resource.primaryPrivateIpAllocationMethod()) .append("\n\tPrimary virtual network ID: ").append(resource.primaryIPConfiguration().networkId()) .append("\n\tPrimary subnet name:").append(resource.primaryIPConfiguration().subnetName()); System.out.println(info.toString()); } /** * Print network security group. * * @param resource a network security group */ public static void print(NetworkSecurityGroup resource) { StringBuilder info = new StringBuilder(); info.append("NSG: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()); for (NetworkSecurityRule rule : resource.securityRules().values()) { info.append("\n\tRule: ").append(rule.name()) .append("\n\t\tAccess: ").append(rule.access()) .append("\n\t\tDirection: ").append(rule.direction()) .append("\n\t\tFrom address: ").append(rule.sourceAddressPrefix()) .append("\n\t\tFrom port range: ").append(rule.sourcePortRange()) .append("\n\t\tTo address: ").append(rule.destinationAddressPrefix()) .append("\n\t\tTo port: ").append(rule.destinationPortRange()) .append("\n\t\tProtocol: ").append(rule.protocol()) .append("\n\t\tPriority: ").append(rule.priority()); } System.out.println(info.toString()); } /** * Print public IP address. * * @param resource a public IP address */ public static void print(PublicIpAddress resource) { System.out.println(new StringBuilder().append("Public IP Address: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tIP Address: ").append(resource.ipAddress()) .append("\n\tLeaf domain label: ").append(resource.leafDomainLabel()) .append("\n\tFQDN: ").append(resource.fqdn()) .append("\n\tReverse FQDN: ").append(resource.reverseFqdn()) .append("\n\tIdle timeout (minutes): ").append(resource.idleTimeoutInMinutes()) .append("\n\tIP allocation method: ").append(resource.ipAllocationMethod()) .append("\n\tZones: ").append(resource.availabilityZones()) .toString()); } /** * Print a key vault. * * @param vault the key vault resource */ public static void print(Vault vault) { StringBuilder info = new StringBuilder().append("Key Vault: ").append(vault.id()) .append("Name: ").append(vault.name()) .append("\n\tResource group: ").append(vault.resourceGroupName()) .append("\n\tRegion: ").append(vault.region()) .append("\n\tSku: ").append(vault.sku().name()).append(" - ").append(vault.sku().family()) .append("\n\tVault URI: ").append(vault.vaultUri()) .append("\n\tAccess policies: "); for (AccessPolicy accessPolicy : vault.accessPolicies()) { info.append("\n\t\tIdentity:").append(accessPolicy.objectId()); if (accessPolicy.permissions() != null) { if (accessPolicy.permissions().keys() != null) { info.append("\n\t\tKey permissions: ").append(accessPolicy.permissions().keys().stream().map(KeyPermissions::toString).collect(Collectors.joining(", "))); } if (accessPolicy.permissions().secrets() != null) { info.append("\n\t\tSecret permissions: ").append(accessPolicy.permissions().secrets().stream().map(SecretPermissions::toString).collect(Collectors.joining(", "))); } if (accessPolicy.permissions().certificates() != null) { info.append("\n\t\tCertificate permissions: ").append(accessPolicy.permissions().certificates().stream().map(CertificatePermissions::toString).collect(Collectors.joining(", "))); } } } System.out.println(info.toString()); } /** * Print storage account. * * @param storageAccount a storage account */ public static void print(StorageAccount storageAccount) { System.out.println(storageAccount.name() + " created @ " + storageAccount.creationTime()); StringBuilder info = new StringBuilder().append("Storage Account: ").append(storageAccount.id()) .append("Name: ").append(storageAccount.name()) .append("\n\tResource group: ").append(storageAccount.resourceGroupName()) .append("\n\tRegion: ").append(storageAccount.region()) .append("\n\tSKU: ").append(storageAccount.skuType().name().toString()) .append("\n\tAccessTier: ").append(storageAccount.accessTier()) .append("\n\tKind: ").append(storageAccount.kind()); info.append("\n\tNetwork Rule Configuration: ") .append("\n\t\tAllow reading logs from any network: ").append(storageAccount.canReadLogEntriesFromAnyNetwork()) .append("\n\t\tAllow reading metrics from any network: ").append(storageAccount.canReadMetricsFromAnyNetwork()) .append("\n\t\tAllow access from all azure services: ").append(storageAccount.canAccessFromAzureServices()); if (storageAccount.networkSubnetsWithAccess().size() > 0) { info.append("\n\t\tNetwork subnets with access: "); for (String subnetId : storageAccount.networkSubnetsWithAccess()) { info.append("\n\t\t\t").append(subnetId); } } if (storageAccount.ipAddressesWithAccess().size() > 0) { info.append("\n\t\tIP addresses with access: "); for (String ipAddress : storageAccount.ipAddressesWithAccess()) { info.append("\n\t\t\t").append(ipAddress); } } if (storageAccount.ipAddressRangesWithAccess().size() > 0) { info.append("\n\t\tIP address-ranges with access: "); for (String ipAddressRange : storageAccount.ipAddressRangesWithAccess()) { info.append("\n\t\t\t").append(ipAddressRange); } } info.append("\n\t\tTraffic allowed from only HTTPS: ").append(storageAccount.innerModel().enableHttpsTrafficOnly()); info.append("\n\tEncryption status: "); info.append("\n\t\tInfrastructure Encryption: ").append(storageAccount.infrastructureEncryptionEnabled() ? "Enabled" : "Disabled"); for (Map.Entry<StorageService, StorageAccountEncryptionStatus> eStatus : storageAccount.encryptionStatuses().entrySet()) { info.append("\n\t\t").append(eStatus.getValue().storageService()).append(": ").append(eStatus.getValue().isEnabled() ? "Enabled" : "Disabled"); } System.out.println(info.toString()); } /** * Print storage account keys. * * @param storageAccountKeys a list of storage account keys */ public static void print(List<StorageAccountKey> storageAccountKeys) { for (int i = 0; i < storageAccountKeys.size(); i++) { StorageAccountKey storageAccountKey = storageAccountKeys.get(i); System.out.println("Key (" + i + ") " + storageAccountKey.keyName() + "=" + storageAccountKey.value()); } } /** * Print Redis Cache. * * @param redisCache a Redis cache. */ public static void print(RedisCache redisCache) { StringBuilder redisInfo = new StringBuilder() .append("Redis Cache Name: ").append(redisCache.name()) .append("\n\tResource group: ").append(redisCache.resourceGroupName()) .append("\n\tRegion: ").append(redisCache.region()) .append("\n\tSKU Name: ").append(redisCache.sku().name()) .append("\n\tSKU Family: ").append(redisCache.sku().family()) .append("\n\tHostname: ").append(redisCache.hostname()) .append("\n\tSSL port: ").append(redisCache.sslPort()) .append("\n\tNon-SSL port (6379) enabled: ").append(redisCache.nonSslPort()); if (redisCache.redisConfiguration() != null && !redisCache.redisConfiguration().isEmpty()) { redisInfo.append("\n\tRedis Configuration:"); for (Map.Entry<String, String> redisConfiguration : redisCache.redisConfiguration().entrySet()) { redisInfo.append("\n\t '").append(redisConfiguration.getKey()) .append("' : '").append(redisConfiguration.getValue()).append("'"); } } if (redisCache.isPremium()) { RedisCachePremium premium = redisCache.asPremium(); List<ScheduleEntry> scheduleEntries = premium.listPatchSchedules(); if (scheduleEntries != null && !scheduleEntries.isEmpty()) { redisInfo.append("\n\tRedis Patch Schedule:"); for (ScheduleEntry schedule : scheduleEntries) { redisInfo.append("\n\t\tDay: '").append(schedule.dayOfWeek()) .append("', start at: '").append(schedule.startHourUtc()) .append("', maintenance window: '").append(schedule.maintenanceWindow()) .append("'"); } } } System.out.println(redisInfo.toString()); } /** * Print Redis Cache access keys. * * @param redisAccessKeys a keys for Redis Cache */ public static void print(RedisAccessKeys redisAccessKeys) { StringBuilder redisKeys = new StringBuilder() .append("Redis Access Keys: ") .append("\n\tPrimary Key: '").append(redisAccessKeys.primaryKey()).append("', ") .append("\n\tSecondary Key: '").append(redisAccessKeys.secondaryKey()).append("', "); System.out.println(redisKeys.toString()); } /** * Print management lock. * * @param lock a management lock */ public static void print(ManagementLock lock) { StringBuilder info = new StringBuilder(); info.append("\nLock ID: ").append(lock.id()) .append("\nLocked resource ID: ").append(lock.lockedResourceId()) .append("\nLevel: ").append(lock.level()); System.out.println(info.toString()); } /** * Print load balancer. * * @param resource a load balancer */ public static void print(LoadBalancer resource) { StringBuilder info = new StringBuilder(); info.append("Load balancer: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tBackends: ").append(resource.backends().keySet().toString()); info.append("\n\tPublic IP address IDs: ") .append(resource.publicIpAddressIds().size()); for (String pipId : resource.publicIpAddressIds()) { info.append("\n\t\tPIP id: ").append(pipId); } info.append("\n\tTCP probes: ") .append(resource.tcpProbes().size()); for (LoadBalancerTcpProbe probe : resource.tcpProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tHTTP probes: ") .append(resource.httpProbes().size()); for (LoadBalancerHttpProbe probe : resource.httpProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()) .append("\n\t\t\tHTTP request path: ").append(probe.requestPath()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tHTTPS probes: ") .append(resource.httpsProbes().size()); for (LoadBalancerHttpProbe probe : resource.httpsProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()) .append("\n\t\t\tHTTPS request path: ").append(probe.requestPath()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tLoad balancing rules: ") .append(resource.loadBalancingRules().size()); for (LoadBalancingRule rule : resource.loadBalancingRules().values()) { info.append("\n\t\tLB rule name: ").append(rule.name()) .append("\n\t\t\tProtocol: ").append(rule.protocol()) .append("\n\t\t\tFloating IP enabled? ").append(rule.floatingIPEnabled()) .append("\n\t\t\tIdle timeout in minutes: ").append(rule.idleTimeoutInMinutes()) .append("\n\t\t\tLoad distribution method: ").append(rule.loadDistribution().toString()); LoadBalancerFrontend frontend = rule.frontend(); info.append("\n\t\t\tFrontend: "); if (frontend != null) { info.append(frontend.name()); } else { info.append("(None)"); } info.append("\n\t\t\tFrontend port: ").append(rule.frontendPort()); LoadBalancerBackend backend = rule.backend(); info.append("\n\t\t\tBackend: "); if (backend != null) { info.append(backend.name()); } else { info.append("(None)"); } info.append("\n\t\t\tBackend port: ").append(rule.backendPort()); LoadBalancerProbe probe = rule.probe(); info.append("\n\t\t\tProbe: "); if (probe == null) { info.append("(None)"); } else { info.append(probe.name()).append(" [").append(probe.protocol().toString()).append("]"); } } info.append("\n\tFrontends: ") .append(resource.frontends().size()); for (LoadBalancerFrontend frontend : resource.frontends().values()) { info.append("\n\t\tFrontend name: ").append(frontend.name()) .append("\n\t\t\tInternet facing: ").append(frontend.isPublic()); if (frontend.isPublic()) { info.append("\n\t\t\tPublic IP Address ID: ").append(((LoadBalancerPublicFrontend) frontend).publicIpAddressId()); } else { info.append("\n\t\t\tVirtual network ID: ").append(((LoadBalancerPrivateFrontend) frontend).networkId()) .append("\n\t\t\tSubnet name: ").append(((LoadBalancerPrivateFrontend) frontend).subnetName()) .append("\n\t\t\tPrivate IP address: ").append(((LoadBalancerPrivateFrontend) frontend).privateIpAddress()) .append("\n\t\t\tPrivate IP allocation method: ").append(((LoadBalancerPrivateFrontend) frontend).privateIpAllocationMethod()); } info.append("\n\t\t\tReferenced inbound NAT pools: ") .append(frontend.inboundNatPools().size()); for (LoadBalancerInboundNatPool pool : frontend.inboundNatPools().values()) { info.append("\n\t\t\t\tName: ").append(pool.name()); } info.append("\n\t\t\tReferenced inbound NAT rules: ") .append(frontend.inboundNatRules().size()); for (LoadBalancerInboundNatRule rule : frontend.inboundNatRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } info.append("\n\t\t\tReferenced load balancing rules: ") .append(frontend.loadBalancingRules().size()); for (LoadBalancingRule rule : frontend.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tInbound NAT rules: ") .append(resource.inboundNatRules().size()); for (LoadBalancerInboundNatRule natRule : resource.inboundNatRules().values()) { info.append("\n\t\tInbound NAT rule name: ").append(natRule.name()) .append("\n\t\t\tProtocol: ").append(natRule.protocol().toString()) .append("\n\t\t\tFrontend: ").append(natRule.frontend().name()) .append("\n\t\t\tFrontend port: ").append(natRule.frontendPort()) .append("\n\t\t\tBackend port: ").append(natRule.backendPort()) .append("\n\t\t\tBackend NIC ID: ").append(natRule.backendNetworkInterfaceId()) .append("\n\t\t\tBackend NIC IP config name: ").append(natRule.backendNicIpConfigurationName()) .append("\n\t\t\tFloating IP? ").append(natRule.floatingIPEnabled()) .append("\n\t\t\tIdle timeout in minutes: ").append(natRule.idleTimeoutInMinutes()); } info.append("\n\tInbound NAT pools: ") .append(resource.inboundNatPools().size()); for (LoadBalancerInboundNatPool natPool : resource.inboundNatPools().values()) { info.append("\n\t\tInbound NAT pool name: ").append(natPool.name()) .append("\n\t\t\tProtocol: ").append(natPool.protocol().toString()) .append("\n\t\t\tFrontend: ").append(natPool.frontend().name()) .append("\n\t\t\tFrontend port range: ") .append(natPool.frontendPortRangeStart()) .append("-") .append(natPool.frontendPortRangeEnd()) .append("\n\t\t\tBackend port: ").append(natPool.backendPort()); } info.append("\n\tBackends: ") .append(resource.backends().size()); for (LoadBalancerBackend backend : resource.backends().values()) { info.append("\n\t\tBackend name: ").append(backend.name()); info.append("\n\t\t\tReferenced NICs: ") .append(backend.backendNicIPConfigurationNames().entrySet().size()); for (Map.Entry<String, String> entry : backend.backendNicIPConfigurationNames().entrySet()) { info.append("\n\t\t\t\tNIC ID: ").append(entry.getKey()) .append(" - IP Config: ").append(entry.getValue()); } Set<String> vmIds = backend.getVirtualMachineIds(); info.append("\n\t\t\tReferenced virtual machine ids: ") .append(vmIds.size()); for (String vmId : vmIds) { info.append("\n\t\t\t\tVM ID: ").append(vmId); } info.append("\n\t\t\tReferenced load balancing rules: ") .append(new ArrayList<String>(backend.loadBalancingRules().keySet())); } System.out.println(info.toString()); } /** * Print app service domain. * * @param resource an app service domain */ public static void print(AppServiceDomain resource) { StringBuilder builder = new StringBuilder().append("Domain: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tCreated time: ").append(resource.createdTime()) .append("\n\tExpiration time: ").append(resource.expirationTime()) .append("\n\tContact: "); Contact contact = resource.registrantContact(); if (contact == null) { builder = builder.append("Private"); } else { builder = builder.append("\n\t\tName: ").append(contact.nameFirst() + " " + contact.nameLast()); } builder = builder.append("\n\tName servers: "); for (String nameServer : resource.nameServers()) { builder = builder.append("\n\t\t" + nameServer); } System.out.println(builder.toString()); } /** * Print app service certificate order. * * @param resource an app service certificate order */ public static void print(AppServiceCertificateOrder resource) { StringBuilder builder = new StringBuilder().append("App service certificate order: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDistinguished name: ").append(resource.distinguishedName()) .append("\n\tProduct type: ").append(resource.productType()) .append("\n\tValid years: ").append(resource.validityInYears()) .append("\n\tStatus: ").append(resource.status()) .append("\n\tIssuance time: ").append(resource.lastCertificateIssuanceTime()) .append("\n\tSigned certificate: ").append(resource.signedCertificate() == null ? null : resource.signedCertificate().thumbprint()); System.out.println(builder.toString()); } /** * Print app service plan. * * @param resource an app service plan */ public static void print(AppServicePlan resource) { StringBuilder builder = new StringBuilder().append("App service certificate order: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tPricing tier: ").append(resource.pricingTier()); System.out.println(builder.toString()); } /** * Print a web app. * * @param resource a web app */ public static void print(WebAppBase resource) { StringBuilder builder = new StringBuilder().append("Web app: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tState: ").append(resource.state()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDefault hostname: ").append(resource.defaultHostname()) .append("\n\tApp service plan: ").append(resource.appServicePlanId()) .append("\n\tHost name bindings: "); for (HostnameBinding binding : resource.getHostnameBindings().values()) { builder = builder.append("\n\t\t" + binding.toString()); } builder = builder.append("\n\tSSL bindings: "); for (HostnameSslState binding : resource.hostnameSslStates().values()) { builder = builder.append("\n\t\t" + binding.name() + ": " + binding.sslState()); if (binding.sslState() != null && binding.sslState() != SslState.DISABLED) { builder = builder.append(" - " + binding.thumbprint()); } } builder = builder.append("\n\tApp settings: "); for (AppSetting setting : resource.getAppSettings().values()) { builder = builder.append("\n\t\t" + setting.key() + ": " + setting.value() + (setting.sticky() ? " - slot setting" : "")); } builder = builder.append("\n\tConnection strings: "); for (ConnectionString conn : resource.getConnectionStrings().values()) { builder = builder.append("\n\t\t" + conn.name() + ": " + conn.value() + " - " + conn.type() + (conn.sticky() ? " - slot setting" : "")); } System.out.println(builder.toString()); } /** * Print a web site. * * @param resource a web site */ public static void print(WebSiteBase resource) { StringBuilder builder = new StringBuilder().append("Web app: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tState: ").append(resource.state()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDefault hostname: ").append(resource.defaultHostname()) .append("\n\tApp service plan: ").append(resource.appServicePlanId()); builder = builder.append("\n\tSSL bindings: "); for (HostnameSslState binding : resource.hostnameSslStates().values()) { builder = builder.append("\n\t\t" + binding.name() + ": " + binding.sslState()); if (binding.sslState() != null && binding.sslState() != SslState.DISABLED) { builder = builder.append(" - " + binding.thumbprint()); } } System.out.println(builder.toString()); } /** * Print a traffic manager profile. * * @param profile a traffic manager profile */ public static void print(TrafficManagerProfile profile) { StringBuilder info = new StringBuilder(); info.append("Traffic Manager Profile: ").append(profile.id()) .append("\n\tName: ").append(profile.name()) .append("\n\tResource group: ").append(profile.resourceGroupName()) .append("\n\tRegion: ").append(profile.regionName()) .append("\n\tTags: ").append(profile.tags()) .append("\n\tDNSLabel: ").append(profile.dnsLabel()) .append("\n\tFQDN: ").append(profile.fqdn()) .append("\n\tTTL: ").append(profile.timeToLive()) .append("\n\tEnabled: ").append(profile.isEnabled()) .append("\n\tRoutingMethod: ").append(profile.trafficRoutingMethod()) .append("\n\tMonitor status: ").append(profile.monitorStatus()) .append("\n\tMonitoring port: ").append(profile.monitoringPort()) .append("\n\tMonitoring path: ").append(profile.monitoringPath()); Map<String, TrafficManagerAzureEndpoint> azureEndpoints = profile.azureEndpoints(); if (!azureEndpoints.isEmpty()) { info.append("\n\tAzure endpoints:"); int idx = 1; for (TrafficManagerAzureEndpoint endpoint : azureEndpoints.values()) { info.append("\n\t\tAzure endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tTarget resourceId: ").append(endpoint.targetAzureResourceId()) .append("\n\t\t\tTarget resourceType: ").append(endpoint.targetResourceType()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } Map<String, TrafficManagerExternalEndpoint> externalEndpoints = profile.externalEndpoints(); if (!externalEndpoints.isEmpty()) { info.append("\n\tExternal endpoints:"); int idx = 1; for (TrafficManagerExternalEndpoint endpoint : externalEndpoints.values()) { info.append("\n\t\tExternal endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tFQDN: ").append(endpoint.fqdn()) .append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } Map<String, TrafficManagerNestedProfileEndpoint> nestedProfileEndpoints = profile.nestedProfileEndpoints(); if (!nestedProfileEndpoints.isEmpty()) { info.append("\n\tNested profile endpoints:"); int idx = 1; for (TrafficManagerNestedProfileEndpoint endpoint : nestedProfileEndpoints.values()) { info.append("\n\t\tNested profile endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tNested profileId: ").append(endpoint.nestedProfileId()) .append("\n\t\t\tMinimum child threshold: ").append(endpoint.minimumChildEndpointCount()) .append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } System.out.println(info.toString()); } /** * Print a dns zone. * * @param dnsZone a dns zone */ public static void print(DnsZone dnsZone) { StringBuilder info = new StringBuilder(); info.append("DNS Zone: ").append(dnsZone.id()) .append("\n\tName (Top level domain): ").append(dnsZone.name()) .append("\n\tResource group: ").append(dnsZone.resourceGroupName()) .append("\n\tRegion: ").append(dnsZone.regionName()) .append("\n\tTags: ").append(dnsZone.tags()) .append("\n\tName servers:"); for (String nameServer : dnsZone.nameServers()) { info.append("\n\t\t").append(nameServer); } SoaRecordSet soaRecordSet = dnsZone.getSoaRecordSet(); SoaRecord soaRecord = soaRecordSet.record(); info.append("\n\tSOA Record:") .append("\n\t\tHost:").append(soaRecord.host()) .append("\n\t\tEmail:").append(soaRecord.email()) .append("\n\t\tExpire time (seconds):").append(soaRecord.expireTime()) .append("\n\t\tRefresh time (seconds):").append(soaRecord.refreshTime()) .append("\n\t\tRetry time (seconds):").append(soaRecord.retryTime()) .append("\n\t\tNegative response cache ttl (seconds):").append(soaRecord.minimumTtl()) .append("\n\t\tTTL (seconds):").append(soaRecordSet.timeToLive()); PagedIterable<ARecordSet> aRecordSets = dnsZone.aRecordSets().list(); info.append("\n\tA Record sets:"); for (ARecordSet aRecordSet : aRecordSets) { info.append("\n\t\tId: ").append(aRecordSet.id()) .append("\n\t\tName: ").append(aRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aRecordSet.timeToLive()) .append("\n\t\tIP v4 addresses: "); for (String ipAddress : aRecordSet.ipv4Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<AaaaRecordSet> aaaaRecordSets = dnsZone.aaaaRecordSets().list(); info.append("\n\tAAAA Record sets:"); for (AaaaRecordSet aaaaRecordSet : aaaaRecordSets) { info.append("\n\t\tId: ").append(aaaaRecordSet.id()) .append("\n\t\tName: ").append(aaaaRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aaaaRecordSet.timeToLive()) .append("\n\t\tIP v6 addresses: "); for (String ipAddress : aaaaRecordSet.ipv6Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cNameRecordSets().list(); info.append("\n\tCNAME Record sets:"); for (CnameRecordSet cnameRecordSet : cnameRecordSets) { info.append("\n\t\tId: ").append(cnameRecordSet.id()) .append("\n\t\tName: ").append(cnameRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(cnameRecordSet.timeToLive()) .append("\n\t\tCanonical name: ").append(cnameRecordSet.canonicalName()); } PagedIterable<MxRecordSet> mxRecordSets = dnsZone.mxRecordSets().list(); info.append("\n\tMX Record sets:"); for (MxRecordSet mxRecordSet : mxRecordSets) { info.append("\n\t\tId: ").append(mxRecordSet.id()) .append("\n\t\tName: ").append(mxRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(mxRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (MxRecord mxRecord : mxRecordSet.records()) { info.append("\n\t\t\tExchange server, Preference: ") .append(mxRecord.exchange()) .append(" ") .append(mxRecord.preference()); } } PagedIterable<NsRecordSet> nsRecordSets = dnsZone.nsRecordSets().list(); info.append("\n\tNS Record sets:"); for (NsRecordSet nsRecordSet : nsRecordSets) { info.append("\n\t\tId: ").append(nsRecordSet.id()) .append("\n\t\tName: ").append(nsRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(nsRecordSet.timeToLive()) .append("\n\t\tName servers: "); for (String nameServer : nsRecordSet.nameServers()) { info.append("\n\t\t\t").append(nameServer); } } PagedIterable<PtrRecordSet> ptrRecordSets = dnsZone.ptrRecordSets().list(); info.append("\n\tPTR Record sets:"); for (PtrRecordSet ptrRecordSet : ptrRecordSets) { info.append("\n\t\tId: ").append(ptrRecordSet.id()) .append("\n\t\tName: ").append(ptrRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(ptrRecordSet.timeToLive()) .append("\n\t\tTarget domain names: "); for (String domainNames : ptrRecordSet.targetDomainNames()) { info.append("\n\t\t\t").append(domainNames); } } PagedIterable<SrvRecordSet> srvRecordSets = dnsZone.srvRecordSets().list(); info.append("\n\tSRV Record sets:"); for (SrvRecordSet srvRecordSet : srvRecordSets) { info.append("\n\t\tId: ").append(srvRecordSet.id()) .append("\n\t\tName: ").append(srvRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(srvRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (SrvRecord srvRecord : srvRecordSet.records()) { info.append("\n\t\t\tTarget, Port, Priority, Weight: ") .append(srvRecord.target()) .append(", ") .append(srvRecord.port()) .append(", ") .append(srvRecord.priority()) .append(", ") .append(srvRecord.weight()); } } PagedIterable<TxtRecordSet> txtRecordSets = dnsZone.txtRecordSets().list(); info.append("\n\tTXT Record sets:"); for (TxtRecordSet txtRecordSet : txtRecordSets) { info.append("\n\t\tId: ").append(txtRecordSet.id()) .append("\n\t\tName: ").append(txtRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(txtRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (TxtRecord txtRecord : txtRecordSet.records()) { if (txtRecord.value().size() > 0) { info.append("\n\t\t\tValue: ").append(txtRecord.value().get(0)); } } } System.out.println(info.toString()); } /** * Print a private dns zone. * * @param privateDnsZone a private dns zone */ public static void print(PrivateDnsZone privateDnsZone) { StringBuilder info = new StringBuilder(); info.append("Private DNS Zone: ").append(privateDnsZone.id()) .append("\n\tName (Top level domain): ").append(privateDnsZone.name()) .append("\n\tResource group: ").append(privateDnsZone.resourceGroupName()) .append("\n\tRegion: ").append(privateDnsZone.regionName()) .append("\n\tTags: ").append(privateDnsZone.tags()) .append("\n\tName servers:"); com.azure.resourcemanager.privatedns.models.SoaRecordSet soaRecordSet = privateDnsZone.getSoaRecordSet(); com.azure.resourcemanager.privatedns.models.SoaRecord soaRecord = soaRecordSet.record(); info.append("\n\tSOA Record:") .append("\n\t\tHost:").append(soaRecord.host()) .append("\n\t\tEmail:").append(soaRecord.email()) .append("\n\t\tExpire time (seconds):").append(soaRecord.expireTime()) .append("\n\t\tRefresh time (seconds):").append(soaRecord.refreshTime()) .append("\n\t\tRetry time (seconds):").append(soaRecord.retryTime()) .append("\n\t\tNegative response cache ttl (seconds):").append(soaRecord.minimumTtl()) .append("\n\t\tTTL (seconds):").append(soaRecordSet.timeToLive()); PagedIterable<com.azure.resourcemanager.privatedns.models.ARecordSet> aRecordSets = privateDnsZone .aRecordSets().list(); info.append("\n\tA Record sets:"); for (com.azure.resourcemanager.privatedns.models.ARecordSet aRecordSet : aRecordSets) { info.append("\n\t\tId: ").append(aRecordSet.id()) .append("\n\t\tName: ").append(aRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aRecordSet.timeToLive()) .append("\n\t\tIP v4 addresses: "); for (String ipAddress : aRecordSet.ipv4Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<com.azure.resourcemanager.privatedns.models.AaaaRecordSet> aaaaRecordSets = privateDnsZone .aaaaRecordSets().list(); info.append("\n\tAAAA Record sets:"); for (com.azure.resourcemanager.privatedns.models.AaaaRecordSet aaaaRecordSet : aaaaRecordSets) { info.append("\n\t\tId: ").append(aaaaRecordSet.id()) .append("\n\t\tName: ").append(aaaaRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aaaaRecordSet.timeToLive()) .append("\n\t\tIP v6 addresses: "); for (String ipAddress : aaaaRecordSet.ipv6Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<com.azure.resourcemanager.privatedns.models.CnameRecordSet> cnameRecordSets = privateDnsZone.cnameRecordSets().list(); info.append("\n\tCNAME Record sets:"); for (com.azure.resourcemanager.privatedns.models.CnameRecordSet cnameRecordSet : cnameRecordSets) { info.append("\n\t\tId: ").append(cnameRecordSet.id()) .append("\n\t\tName: ").append(cnameRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(cnameRecordSet.timeToLive()) .append("\n\t\tCanonical name: ").append(cnameRecordSet.canonicalName()); } PagedIterable<com.azure.resourcemanager.privatedns.models.MxRecordSet> mxRecordSets = privateDnsZone.mxRecordSets().list(); info.append("\n\tMX Record sets:"); for (com.azure.resourcemanager.privatedns.models.MxRecordSet mxRecordSet : mxRecordSets) { info.append("\n\t\tId: ").append(mxRecordSet.id()) .append("\n\t\tName: ").append(mxRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(mxRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.MxRecord mxRecord : mxRecordSet.records()) { info.append("\n\t\t\tExchange server, Preference: ") .append(mxRecord.exchange()) .append(" ") .append(mxRecord.preference()); } } PagedIterable<com.azure.resourcemanager.privatedns.models.PtrRecordSet> ptrRecordSets = privateDnsZone .ptrRecordSets().list(); info.append("\n\tPTR Record sets:"); for (com.azure.resourcemanager.privatedns.models.PtrRecordSet ptrRecordSet : ptrRecordSets) { info.append("\n\t\tId: ").append(ptrRecordSet.id()) .append("\n\t\tName: ").append(ptrRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(ptrRecordSet.timeToLive()) .append("\n\t\tTarget domain names: "); for (String domainNames : ptrRecordSet.targetDomainNames()) { info.append("\n\t\t\t").append(domainNames); } } PagedIterable<com.azure.resourcemanager.privatedns.models.SrvRecordSet> srvRecordSets = privateDnsZone .srvRecordSets().list(); info.append("\n\tSRV Record sets:"); for (com.azure.resourcemanager.privatedns.models.SrvRecordSet srvRecordSet : srvRecordSets) { info.append("\n\t\tId: ").append(srvRecordSet.id()) .append("\n\t\tName: ").append(srvRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(srvRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.SrvRecord srvRecord : srvRecordSet.records()) { info.append("\n\t\t\tTarget, Port, Priority, Weight: ") .append(srvRecord.target()) .append(", ") .append(srvRecord.port()) .append(", ") .append(srvRecord.priority()) .append(", ") .append(srvRecord.weight()); } } PagedIterable<com.azure.resourcemanager.privatedns.models.TxtRecordSet> txtRecordSets = privateDnsZone .txtRecordSets().list(); info.append("\n\tTXT Record sets:"); for (com.azure.resourcemanager.privatedns.models.TxtRecordSet txtRecordSet : txtRecordSets) { info.append("\n\t\tId: ").append(txtRecordSet.id()) .append("\n\t\tName: ").append(txtRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(txtRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.TxtRecord txtRecord : txtRecordSet.records()) { if (txtRecord.value().size() > 0) { info.append("\n\t\t\tValue: ").append(txtRecord.value().get(0)); } } } PagedIterable<VirtualNetworkLink> virtualNetworkLinks = privateDnsZone.virtualNetworkLinks().list(); info.append("\n\tVirtual Network Links:"); for (VirtualNetworkLink virtualNetworkLink : virtualNetworkLinks) { info.append("\n\tId: ").append(virtualNetworkLink.id()) .append("\n\tName: ").append(virtualNetworkLink.name()) .append("\n\tReference of Virtual Network: ").append(virtualNetworkLink.referencedVirtualNetworkId()) .append("\n\tRegistration enabled: ").append(virtualNetworkLink.isAutoRegistrationEnabled()); } System.out.println(info.toString()); } /** * Print an Azure Container Registry. * * @param azureRegistry an Azure Container Registry */ public static void print(Registry azureRegistry) { StringBuilder info = new StringBuilder(); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); info.append("Azure Container Registry: ").append(azureRegistry.id()) .append("\n\tName: ").append(azureRegistry.name()) .append("\n\tServer Url: ").append(azureRegistry.loginServerUrl()) .append("\n\tUser: ").append(acrCredentials.username()) .append("\n\tFirst Password: ").append(acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)) .append("\n\tSecond Password: ").append(acrCredentials.accessKeys().get(AccessKeyType.SECONDARY)); System.out.println(info.toString()); } /** * Print an Azure Container Service (AKS). * * @param kubernetesCluster a managed container service */ public static void print(KubernetesCluster kubernetesCluster) { StringBuilder info = new StringBuilder(); info.append("Azure Container Service: ").append(kubernetesCluster.id()) .append("\n\tName: ").append(kubernetesCluster.name()) .append("\n\tFQDN: ").append(kubernetesCluster.fqdn()) .append("\n\tDNS prefix label: ").append(kubernetesCluster.dnsPrefix()) .append("\n\t\tWith Agent pool name: ").append(new ArrayList<>(kubernetesCluster.agentPools().keySet()).get(0)) .append("\n\t\tAgent pool count: ").append(new ArrayList<>(kubernetesCluster.agentPools().values()).get(0).count()) .append("\n\t\tAgent pool VM size: ").append(new ArrayList<>(kubernetesCluster.agentPools().values()).get(0).vmSize().toString()) .append("\n\tLinux user name: ").append(kubernetesCluster.linuxRootUsername()) .append("\n\tSSH key: ").append(kubernetesCluster.sshKey()) .append("\n\tService principal client ID: ").append(kubernetesCluster.servicePrincipalClientId()); System.out.println(info.toString()); } /** * Print an Azure Search Service. * * @param searchService an Azure Search Service */ public static void print(SearchService searchService) { StringBuilder info = new StringBuilder(); AdminKeys adminKeys = searchService.getAdminKeys(); PagedIterable<QueryKey> queryKeys = searchService.listQueryKeys(); info.append("Azure Search: ").append(searchService.id()) .append("\n\tResource group: ").append(searchService.resourceGroupName()) .append("\n\tRegion: ").append(searchService.region()) .append("\n\tTags: ").append(searchService.tags()) .append("\n\tSku: ").append(searchService.sku().name()) .append("\n\tStatus: ").append(searchService.status()) .append("\n\tProvisioning State: ").append(searchService.provisioningState()) .append("\n\tHosting Mode: ").append(searchService.hostingMode()) .append("\n\tReplicas: ").append(searchService.replicaCount()) .append("\n\tPartitions: ").append(searchService.partitionCount()) .append("\n\tPrimary Admin Key: ").append(adminKeys.primaryKey()) .append("\n\tSecondary Admin Key: ").append(adminKeys.secondaryKey()) .append("\n\tQuery keys:"); for (QueryKey queryKey : queryKeys) { info.append("\n\t\tKey name: ").append(queryKey.name()); info.append("\n\t\t Value: ").append(queryKey.key()); } System.out.println(info.toString()); } /** * Retrieve the secondary service principal client ID. * * @param envSecondaryServicePrincipal an Azure Container Registry * @return a service principal client ID * @throws IOException exception */
class Utils { private static final ClientLogger LOGGER = new ClientLogger(Utils.class); private static String sshPublicKey; private Utils() { } /** @return a generated password */ public static String password() { String password = new ResourceManagerUtils.InternalRuntimeContext().randomResourceName("Pa5$", 12); System.out.printf("Password: %s%n", password); return password; } /** * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } /** * Creates a randomized resource name. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @param prefix the prefix to the name. * @param maxLen the max length of the name. * @return the randomized resource name. */ public static String randomResourceName(AzureResourceManager azure, String prefix, int maxLen) { return azure.resourceGroups().manager().internalContext().randomResourceName(prefix, maxLen); } /** * Generates the specified number of random resource names with the same prefix. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @param prefix the prefix to be used if possible * @param maxLen the maximum length for the random generated name * @param count the number of names to generate * @return the randomized resource names. */ public static String[] randomResourceNames(AzureResourceManager azure, String prefix, int maxLen, int count) { String[] names = new String[count]; for (int i = 0; i < count; i++) { names[i] = randomResourceName(azure, prefix, maxLen); } return names; } /** * Creates a random UUID. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @return the random UUID. */ public static String randomUuid(AzureResourceManager azure) { return azure.resourceGroups().manager().internalContext().randomUuid(); } /** * Creates a randomized resource name. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param authenticated the AzureResourceManager.Authenticated instance. * @param prefix the prefix to the name. * @param maxLen the max length of the name. * @return the randomized resource name. */ public static String randomResourceName(AzureResourceManager.Authenticated authenticated, String prefix, int maxLen) { return authenticated.roleAssignments().manager().internalContext().randomResourceName(prefix, maxLen); } /** * Print resource group info. * * @param resource a resource group */ public static void print(ResourceGroup resource) { StringBuilder info = new StringBuilder(); info.append("Resource Group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()); System.out.println(info.toString()); } /** * Print User Assigned MSI info. * * @param resource a User Assigned MSI */ public static void print(Identity resource) { StringBuilder info = new StringBuilder(); info.append("Resource Group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tService Principal Id: ").append(resource.principalId()) .append("\n\tClient Id: ").append(resource.clientId()) .append("\n\tTenant Id: ").append(resource.tenantId()); System.out.println(info.toString()); } /** * Print virtual machine info. * * @param resource a virtual machine */ public static void print(VirtualMachine resource) { StringBuilder storageProfile = new StringBuilder().append("\n\tStorageProfile: "); if (resource.storageProfile().imageReference() != null) { storageProfile.append("\n\t\tImageReference:"); storageProfile.append("\n\t\t\tPublisher: ").append(resource.storageProfile().imageReference().publisher()); storageProfile.append("\n\t\t\tOffer: ").append(resource.storageProfile().imageReference().offer()); storageProfile.append("\n\t\t\tSKU: ").append(resource.storageProfile().imageReference().sku()); storageProfile.append("\n\t\t\tVersion: ").append(resource.storageProfile().imageReference().version()); } if (resource.storageProfile().osDisk() != null) { storageProfile.append("\n\t\tOSDisk:"); storageProfile.append("\n\t\t\tOSType: ").append(resource.storageProfile().osDisk().osType()); storageProfile.append("\n\t\t\tName: ").append(resource.storageProfile().osDisk().name()); storageProfile.append("\n\t\t\tCaching: ").append(resource.storageProfile().osDisk().caching()); storageProfile.append("\n\t\t\tCreateOption: ").append(resource.storageProfile().osDisk().createOption()); storageProfile.append("\n\t\t\tDiskSizeGB: ").append(resource.storageProfile().osDisk().diskSizeGB()); if (resource.storageProfile().osDisk().managedDisk() != null) { if (resource.storageProfile().osDisk().managedDisk().diskEncryptionSet() != null) { storageProfile.append("\n\t\t\tDiskEncryptionSet Id: ") .append(resource.storageProfile().osDisk().managedDisk().diskEncryptionSet().id()); } } if (resource.storageProfile().osDisk().image() != null) { storageProfile.append("\n\t\t\tImage Uri: ").append(resource.storageProfile().osDisk().image().uri()); } if (resource.storageProfile().osDisk().vhd() != null) { storageProfile.append("\n\t\t\tVhd Uri: ").append(resource.storageProfile().osDisk().vhd().uri()); } if (resource.storageProfile().osDisk().encryptionSettings() != null) { storageProfile.append("\n\t\t\tEncryptionSettings: "); storageProfile.append("\n\t\t\t\tEnabled: ").append(resource.storageProfile().osDisk().encryptionSettings().enabled()); storageProfile.append("\n\t\t\t\tDiskEncryptionKey Uri: ").append(resource .storageProfile() .osDisk() .encryptionSettings() .diskEncryptionKey().secretUrl()); storageProfile.append("\n\t\t\t\tKeyEncryptionKey Uri: ").append(resource .storageProfile() .osDisk() .encryptionSettings() .keyEncryptionKey().keyUrl()); } } if (resource.storageProfile().dataDisks() != null) { int i = 0; for (DataDisk disk : resource.storageProfile().dataDisks()) { storageProfile.append("\n\t\tDataDisk: storageProfile.append("\n\t\t\tName: ").append(disk.name()); storageProfile.append("\n\t\t\tCaching: ").append(disk.caching()); storageProfile.append("\n\t\t\tCreateOption: ").append(disk.createOption()); storageProfile.append("\n\t\t\tDiskSizeGB: ").append(disk.diskSizeGB()); storageProfile.append("\n\t\t\tLun: ").append(disk.lun()); if (resource.isManagedDiskEnabled()) { if (disk.managedDisk() != null) { storageProfile.append("\n\t\t\tManaged Disk Id: ").append(disk.managedDisk().id()); if (disk.managedDisk().diskEncryptionSet() != null) { storageProfile.append("\n\t\t\tDiskEncryptionSet Id: ").append(disk.managedDisk().diskEncryptionSet().id()); } } } else { if (disk.vhd().uri() != null) { storageProfile.append("\n\t\t\tVhd Uri: ").append(disk.vhd().uri()); } } if (disk.image() != null) { storageProfile.append("\n\t\t\tImage Uri: ").append(disk.image().uri()); } } } StringBuilder osProfile = new StringBuilder().append("\n\tOSProfile: "); if (resource.osProfile() != null) { osProfile.append("\n\t\tComputerName:").append(resource.osProfile().computerName()); if (resource.osProfile().windowsConfiguration() != null) { osProfile.append("\n\t\t\tWindowsConfiguration: "); osProfile.append("\n\t\t\t\tProvisionVMAgent: ") .append(resource.osProfile().windowsConfiguration().provisionVMAgent()); osProfile.append("\n\t\t\t\tEnableAutomaticUpdates: ") .append(resource.osProfile().windowsConfiguration().enableAutomaticUpdates()); osProfile.append("\n\t\t\t\tTimeZone: ") .append(resource.osProfile().windowsConfiguration().timeZone()); } if (resource.osProfile().linuxConfiguration() != null) { osProfile.append("\n\t\t\tLinuxConfiguration: "); osProfile.append("\n\t\t\t\tDisablePasswordAuthentication: ") .append(resource.osProfile().linuxConfiguration().disablePasswordAuthentication()); } } else { osProfile.append("null"); } StringBuilder networkProfile = new StringBuilder().append("\n\tNetworkProfile: "); for (String networkInterfaceId : resource.networkInterfaceIds()) { networkProfile.append("\n\t\tId:").append(networkInterfaceId); } StringBuilder extensions = new StringBuilder().append("\n\tExtensions: "); for (Map.Entry<String, VirtualMachineExtension> extensionEntry : resource.listExtensions().entrySet()) { VirtualMachineExtension extension = extensionEntry.getValue(); extensions.append("\n\t\tExtension: ").append(extension.id()) .append("\n\t\t\tName: ").append(extension.name()) .append("\n\t\t\tTags: ").append(extension.tags()) .append("\n\t\t\tProvisioningState: ").append(extension.provisioningState()) .append("\n\t\t\tAuto upgrade minor version enabled: ").append(extension.autoUpgradeMinorVersionEnabled()) .append("\n\t\t\tPublisher: ").append(extension.publisherName()) .append("\n\t\t\tType: ").append(extension.typeName()) .append("\n\t\t\tVersion: ").append(extension.versionName()) .append("\n\t\t\tPublic Settings: ").append(extension.publicSettingsAsJsonString()); } StringBuilder msi = new StringBuilder().append("\n\tMSI: "); msi.append("\n\t\t\tMSI enabled:").append(resource.isManagedServiceIdentityEnabled()); msi.append("\n\t\t\tSystem Assigned MSI Active Directory Service Principal Id:").append(resource.systemAssignedManagedServiceIdentityPrincipalId()); msi.append("\n\t\t\tSystem Assigned MSI Active Directory Tenant Id:").append(resource.systemAssignedManagedServiceIdentityTenantId()); StringBuilder zones = new StringBuilder().append("\n\tZones: "); zones.append(resource.availabilityZones()); StringBuilder securityProfile = new StringBuilder().append("\n\tSecurityProfile: "); securityProfile.append("\n\t\t\tSecurity type: ").append(resource.securityType()); securityProfile.append("\n\t\t\tSecure Boot enabled: ").append(resource.isSecureBootEnabled()); securityProfile.append("\n\t\t\tvTPM enabled: ").append(resource.isVTpmEnabled()); System.out.println(new StringBuilder().append("Virtual Machine: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tHardwareProfile: ") .append("\n\t\tSize: ").append(resource.size()) .append(storageProfile) .append(osProfile) .append(networkProfile) .append(extensions) .append(msi) .append(zones) .append(securityProfile) .toString()); } /** * Print availability set info. * * @param resource an availability set */ public static void print(AvailabilitySet resource) { System.out.println(new StringBuilder().append("Availability Set: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tFault domain count: ").append(resource.faultDomainCount()) .append("\n\tUpdate domain count: ").append(resource.updateDomainCount()) .toString()); } /** * Print network info. * * @param resource a network * @throws ManagementException Cloud errors */ public static void print(Network resource) { StringBuilder info = new StringBuilder(); info.append("Network: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tAddress spaces: ").append(resource.addressSpaces()) .append("\n\tDNS server IPs: ").append(resource.dnsServerIPs()); for (Subnet subnet : resource.subnets().values()) { info.append("\n\tSubnet: ").append(subnet.name()) .append("\n\t\tAddress prefix: ").append(subnet.addressPrefix()); NetworkSecurityGroup subnetNsg = subnet.getNetworkSecurityGroup(); if (subnetNsg != null) { info.append("\n\t\tNetwork security group ID: ").append(subnetNsg.id()); } RouteTable routeTable = subnet.getRouteTable(); if (routeTable != null) { info.append("\n\tRoute table ID: ").append(routeTable.id()); } Map<ServiceEndpointType, List<Region>> services = subnet.servicesWithAccess(); if (services.size() > 0) { info.append("\n\tServices with access"); for (Map.Entry<ServiceEndpointType, List<Region>> service : services.entrySet()) { info.append("\n\t\tService: ") .append(service.getKey()) .append(" Regions: " + service.getValue() + ""); } } } for (NetworkPeering peering : resource.peerings().list()) { info.append("\n\tPeering: ").append(peering.name()) .append("\n\t\tRemote network ID: ").append(peering.remoteNetworkId()) .append("\n\t\tPeering state: ").append(peering.state()) .append("\n\t\tIs traffic forwarded from remote network allowed? ").append(peering.isTrafficForwardingFromRemoteNetworkAllowed()) .append("\n\t\tGateway use: ").append(peering.gatewayUse()); } System.out.println(info.toString()); } /** * Print network interface. * * @param resource a network interface */ public static void print(NetworkInterface resource) { StringBuilder info = new StringBuilder(); info.append("NetworkInterface: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tInternal DNS name label: ").append(resource.internalDnsNameLabel()) .append("\n\tInternal FQDN: ").append(resource.internalFqdn()) .append("\n\tInternal domain name suffix: ").append(resource.internalDomainNameSuffix()) .append("\n\tNetwork security group: ").append(resource.networkSecurityGroupId()) .append("\n\tApplied DNS servers: ").append(resource.appliedDnsServers().toString()) .append("\n\tDNS server IPs: "); for (String dnsServerIp : resource.dnsServers()) { info.append("\n\t\t").append(dnsServerIp); } info.append("\n\tIP forwarding enabled? ").append(resource.isIPForwardingEnabled()) .append("\n\tAccelerated networking enabled? ").append(resource.isAcceleratedNetworkingEnabled()) .append("\n\tMAC Address:").append(resource.macAddress()) .append("\n\tPrivate IP:").append(resource.primaryPrivateIP()) .append("\n\tPrivate allocation method:").append(resource.primaryPrivateIpAllocationMethod()) .append("\n\tPrimary virtual network ID: ").append(resource.primaryIPConfiguration().networkId()) .append("\n\tPrimary subnet name:").append(resource.primaryIPConfiguration().subnetName()); System.out.println(info.toString()); } /** * Print network security group. * * @param resource a network security group */ public static void print(NetworkSecurityGroup resource) { StringBuilder info = new StringBuilder(); info.append("NSG: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()); for (NetworkSecurityRule rule : resource.securityRules().values()) { info.append("\n\tRule: ").append(rule.name()) .append("\n\t\tAccess: ").append(rule.access()) .append("\n\t\tDirection: ").append(rule.direction()) .append("\n\t\tFrom address: ").append(rule.sourceAddressPrefix()) .append("\n\t\tFrom port range: ").append(rule.sourcePortRange()) .append("\n\t\tTo address: ").append(rule.destinationAddressPrefix()) .append("\n\t\tTo port: ").append(rule.destinationPortRange()) .append("\n\t\tProtocol: ").append(rule.protocol()) .append("\n\t\tPriority: ").append(rule.priority()); } System.out.println(info.toString()); } /** * Print public IP address. * * @param resource a public IP address */ public static void print(PublicIpAddress resource) { System.out.println(new StringBuilder().append("Public IP Address: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tIP Address: ").append(resource.ipAddress()) .append("\n\tLeaf domain label: ").append(resource.leafDomainLabel()) .append("\n\tFQDN: ").append(resource.fqdn()) .append("\n\tReverse FQDN: ").append(resource.reverseFqdn()) .append("\n\tIdle timeout (minutes): ").append(resource.idleTimeoutInMinutes()) .append("\n\tIP allocation method: ").append(resource.ipAllocationMethod()) .append("\n\tZones: ").append(resource.availabilityZones()) .toString()); } /** * Print a key vault. * * @param vault the key vault resource */ public static void print(Vault vault) { StringBuilder info = new StringBuilder().append("Key Vault: ").append(vault.id()) .append("Name: ").append(vault.name()) .append("\n\tResource group: ").append(vault.resourceGroupName()) .append("\n\tRegion: ").append(vault.region()) .append("\n\tSku: ").append(vault.sku().name()).append(" - ").append(vault.sku().family()) .append("\n\tVault URI: ").append(vault.vaultUri()) .append("\n\tAccess policies: "); for (AccessPolicy accessPolicy : vault.accessPolicies()) { info.append("\n\t\tIdentity:").append(accessPolicy.objectId()); if (accessPolicy.permissions() != null) { if (accessPolicy.permissions().keys() != null) { info.append("\n\t\tKey permissions: ").append(accessPolicy.permissions().keys().stream().map(KeyPermissions::toString).collect(Collectors.joining(", "))); } if (accessPolicy.permissions().secrets() != null) { info.append("\n\t\tSecret permissions: ").append(accessPolicy.permissions().secrets().stream().map(SecretPermissions::toString).collect(Collectors.joining(", "))); } if (accessPolicy.permissions().certificates() != null) { info.append("\n\t\tCertificate permissions: ").append(accessPolicy.permissions().certificates().stream().map(CertificatePermissions::toString).collect(Collectors.joining(", "))); } } } System.out.println(info.toString()); } /** * Print storage account. * * @param storageAccount a storage account */ public static void print(StorageAccount storageAccount) { System.out.println(storageAccount.name() + " created @ " + storageAccount.creationTime()); StringBuilder info = new StringBuilder().append("Storage Account: ").append(storageAccount.id()) .append("Name: ").append(storageAccount.name()) .append("\n\tResource group: ").append(storageAccount.resourceGroupName()) .append("\n\tRegion: ").append(storageAccount.region()) .append("\n\tSKU: ").append(storageAccount.skuType().name().toString()) .append("\n\tAccessTier: ").append(storageAccount.accessTier()) .append("\n\tKind: ").append(storageAccount.kind()); info.append("\n\tNetwork Rule Configuration: ") .append("\n\t\tAllow reading logs from any network: ").append(storageAccount.canReadLogEntriesFromAnyNetwork()) .append("\n\t\tAllow reading metrics from any network: ").append(storageAccount.canReadMetricsFromAnyNetwork()) .append("\n\t\tAllow access from all azure services: ").append(storageAccount.canAccessFromAzureServices()); if (storageAccount.networkSubnetsWithAccess().size() > 0) { info.append("\n\t\tNetwork subnets with access: "); for (String subnetId : storageAccount.networkSubnetsWithAccess()) { info.append("\n\t\t\t").append(subnetId); } } if (storageAccount.ipAddressesWithAccess().size() > 0) { info.append("\n\t\tIP addresses with access: "); for (String ipAddress : storageAccount.ipAddressesWithAccess()) { info.append("\n\t\t\t").append(ipAddress); } } if (storageAccount.ipAddressRangesWithAccess().size() > 0) { info.append("\n\t\tIP address-ranges with access: "); for (String ipAddressRange : storageAccount.ipAddressRangesWithAccess()) { info.append("\n\t\t\t").append(ipAddressRange); } } info.append("\n\t\tTraffic allowed from only HTTPS: ").append(storageAccount.innerModel().enableHttpsTrafficOnly()); info.append("\n\tEncryption status: "); info.append("\n\t\tInfrastructure Encryption: ").append(storageAccount.infrastructureEncryptionEnabled() ? "Enabled" : "Disabled"); for (Map.Entry<StorageService, StorageAccountEncryptionStatus> eStatus : storageAccount.encryptionStatuses().entrySet()) { info.append("\n\t\t").append(eStatus.getValue().storageService()).append(": ").append(eStatus.getValue().isEnabled() ? "Enabled" : "Disabled"); } System.out.println(info.toString()); } /** * Print storage account keys. * * @param storageAccountKeys a list of storage account keys */ public static void print(List<StorageAccountKey> storageAccountKeys) { for (int i = 0; i < storageAccountKeys.size(); i++) { StorageAccountKey storageAccountKey = storageAccountKeys.get(i); System.out.println("Key (" + i + ") " + storageAccountKey.keyName() + "=" + storageAccountKey.value()); } } /** * Print Redis Cache. * * @param redisCache a Redis cache. */ public static void print(RedisCache redisCache) { StringBuilder redisInfo = new StringBuilder() .append("Redis Cache Name: ").append(redisCache.name()) .append("\n\tResource group: ").append(redisCache.resourceGroupName()) .append("\n\tRegion: ").append(redisCache.region()) .append("\n\tSKU Name: ").append(redisCache.sku().name()) .append("\n\tSKU Family: ").append(redisCache.sku().family()) .append("\n\tHostname: ").append(redisCache.hostname()) .append("\n\tSSL port: ").append(redisCache.sslPort()) .append("\n\tNon-SSL port (6379) enabled: ").append(redisCache.nonSslPort()); if (redisCache.redisConfiguration() != null && !redisCache.redisConfiguration().isEmpty()) { redisInfo.append("\n\tRedis Configuration:"); for (Map.Entry<String, String> redisConfiguration : redisCache.redisConfiguration().entrySet()) { redisInfo.append("\n\t '").append(redisConfiguration.getKey()) .append("' : '").append(redisConfiguration.getValue()).append("'"); } } if (redisCache.isPremium()) { RedisCachePremium premium = redisCache.asPremium(); List<ScheduleEntry> scheduleEntries = premium.listPatchSchedules(); if (scheduleEntries != null && !scheduleEntries.isEmpty()) { redisInfo.append("\n\tRedis Patch Schedule:"); for (ScheduleEntry schedule : scheduleEntries) { redisInfo.append("\n\t\tDay: '").append(schedule.dayOfWeek()) .append("', start at: '").append(schedule.startHourUtc()) .append("', maintenance window: '").append(schedule.maintenanceWindow()) .append("'"); } } } System.out.println(redisInfo.toString()); } /** * Print Redis Cache access keys. * * @param redisAccessKeys a keys for Redis Cache */ public static void print(RedisAccessKeys redisAccessKeys) { StringBuilder redisKeys = new StringBuilder() .append("Redis Access Keys: ") .append("\n\tPrimary Key: '").append(redisAccessKeys.primaryKey()).append("', ") .append("\n\tSecondary Key: '").append(redisAccessKeys.secondaryKey()).append("', "); System.out.println(redisKeys.toString()); } /** * Print management lock. * * @param lock a management lock */ public static void print(ManagementLock lock) { StringBuilder info = new StringBuilder(); info.append("\nLock ID: ").append(lock.id()) .append("\nLocked resource ID: ").append(lock.lockedResourceId()) .append("\nLevel: ").append(lock.level()); System.out.println(info.toString()); } /** * Print load balancer. * * @param resource a load balancer */ public static void print(LoadBalancer resource) { StringBuilder info = new StringBuilder(); info.append("Load balancer: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tBackends: ").append(resource.backends().keySet().toString()); info.append("\n\tPublic IP address IDs: ") .append(resource.publicIpAddressIds().size()); for (String pipId : resource.publicIpAddressIds()) { info.append("\n\t\tPIP id: ").append(pipId); } info.append("\n\tTCP probes: ") .append(resource.tcpProbes().size()); for (LoadBalancerTcpProbe probe : resource.tcpProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tHTTP probes: ") .append(resource.httpProbes().size()); for (LoadBalancerHttpProbe probe : resource.httpProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()) .append("\n\t\t\tHTTP request path: ").append(probe.requestPath()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tHTTPS probes: ") .append(resource.httpsProbes().size()); for (LoadBalancerHttpProbe probe : resource.httpsProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()) .append("\n\t\t\tHTTPS request path: ").append(probe.requestPath()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tLoad balancing rules: ") .append(resource.loadBalancingRules().size()); for (LoadBalancingRule rule : resource.loadBalancingRules().values()) { info.append("\n\t\tLB rule name: ").append(rule.name()) .append("\n\t\t\tProtocol: ").append(rule.protocol()) .append("\n\t\t\tFloating IP enabled? ").append(rule.floatingIPEnabled()) .append("\n\t\t\tIdle timeout in minutes: ").append(rule.idleTimeoutInMinutes()) .append("\n\t\t\tLoad distribution method: ").append(rule.loadDistribution().toString()); LoadBalancerFrontend frontend = rule.frontend(); info.append("\n\t\t\tFrontend: "); if (frontend != null) { info.append(frontend.name()); } else { info.append("(None)"); } info.append("\n\t\t\tFrontend port: ").append(rule.frontendPort()); LoadBalancerBackend backend = rule.backend(); info.append("\n\t\t\tBackend: "); if (backend != null) { info.append(backend.name()); } else { info.append("(None)"); } info.append("\n\t\t\tBackend port: ").append(rule.backendPort()); LoadBalancerProbe probe = rule.probe(); info.append("\n\t\t\tProbe: "); if (probe == null) { info.append("(None)"); } else { info.append(probe.name()).append(" [").append(probe.protocol().toString()).append("]"); } } info.append("\n\tFrontends: ") .append(resource.frontends().size()); for (LoadBalancerFrontend frontend : resource.frontends().values()) { info.append("\n\t\tFrontend name: ").append(frontend.name()) .append("\n\t\t\tInternet facing: ").append(frontend.isPublic()); if (frontend.isPublic()) { info.append("\n\t\t\tPublic IP Address ID: ").append(((LoadBalancerPublicFrontend) frontend).publicIpAddressId()); } else { info.append("\n\t\t\tVirtual network ID: ").append(((LoadBalancerPrivateFrontend) frontend).networkId()) .append("\n\t\t\tSubnet name: ").append(((LoadBalancerPrivateFrontend) frontend).subnetName()) .append("\n\t\t\tPrivate IP address: ").append(((LoadBalancerPrivateFrontend) frontend).privateIpAddress()) .append("\n\t\t\tPrivate IP allocation method: ").append(((LoadBalancerPrivateFrontend) frontend).privateIpAllocationMethod()); } info.append("\n\t\t\tReferenced inbound NAT pools: ") .append(frontend.inboundNatPools().size()); for (LoadBalancerInboundNatPool pool : frontend.inboundNatPools().values()) { info.append("\n\t\t\t\tName: ").append(pool.name()); } info.append("\n\t\t\tReferenced inbound NAT rules: ") .append(frontend.inboundNatRules().size()); for (LoadBalancerInboundNatRule rule : frontend.inboundNatRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } info.append("\n\t\t\tReferenced load balancing rules: ") .append(frontend.loadBalancingRules().size()); for (LoadBalancingRule rule : frontend.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tInbound NAT rules: ") .append(resource.inboundNatRules().size()); for (LoadBalancerInboundNatRule natRule : resource.inboundNatRules().values()) { info.append("\n\t\tInbound NAT rule name: ").append(natRule.name()) .append("\n\t\t\tProtocol: ").append(natRule.protocol().toString()) .append("\n\t\t\tFrontend: ").append(natRule.frontend().name()) .append("\n\t\t\tFrontend port: ").append(natRule.frontendPort()) .append("\n\t\t\tBackend port: ").append(natRule.backendPort()) .append("\n\t\t\tBackend NIC ID: ").append(natRule.backendNetworkInterfaceId()) .append("\n\t\t\tBackend NIC IP config name: ").append(natRule.backendNicIpConfigurationName()) .append("\n\t\t\tFloating IP? ").append(natRule.floatingIPEnabled()) .append("\n\t\t\tIdle timeout in minutes: ").append(natRule.idleTimeoutInMinutes()); } info.append("\n\tInbound NAT pools: ") .append(resource.inboundNatPools().size()); for (LoadBalancerInboundNatPool natPool : resource.inboundNatPools().values()) { info.append("\n\t\tInbound NAT pool name: ").append(natPool.name()) .append("\n\t\t\tProtocol: ").append(natPool.protocol().toString()) .append("\n\t\t\tFrontend: ").append(natPool.frontend().name()) .append("\n\t\t\tFrontend port range: ") .append(natPool.frontendPortRangeStart()) .append("-") .append(natPool.frontendPortRangeEnd()) .append("\n\t\t\tBackend port: ").append(natPool.backendPort()); } info.append("\n\tBackends: ") .append(resource.backends().size()); for (LoadBalancerBackend backend : resource.backends().values()) { info.append("\n\t\tBackend name: ").append(backend.name()); info.append("\n\t\t\tReferenced NICs: ") .append(backend.backendNicIPConfigurationNames().entrySet().size()); for (Map.Entry<String, String> entry : backend.backendNicIPConfigurationNames().entrySet()) { info.append("\n\t\t\t\tNIC ID: ").append(entry.getKey()) .append(" - IP Config: ").append(entry.getValue()); } Set<String> vmIds = backend.getVirtualMachineIds(); info.append("\n\t\t\tReferenced virtual machine ids: ") .append(vmIds.size()); for (String vmId : vmIds) { info.append("\n\t\t\t\tVM ID: ").append(vmId); } info.append("\n\t\t\tReferenced load balancing rules: ") .append(new ArrayList<String>(backend.loadBalancingRules().keySet())); } System.out.println(info.toString()); } /** * Print app service domain. * * @param resource an app service domain */ public static void print(AppServiceDomain resource) { StringBuilder builder = new StringBuilder().append("Domain: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tCreated time: ").append(resource.createdTime()) .append("\n\tExpiration time: ").append(resource.expirationTime()) .append("\n\tContact: "); Contact contact = resource.registrantContact(); if (contact == null) { builder = builder.append("Private"); } else { builder = builder.append("\n\t\tName: ").append(contact.nameFirst() + " " + contact.nameLast()); } builder = builder.append("\n\tName servers: "); for (String nameServer : resource.nameServers()) { builder = builder.append("\n\t\t" + nameServer); } System.out.println(builder.toString()); } /** * Print app service certificate order. * * @param resource an app service certificate order */ public static void print(AppServiceCertificateOrder resource) { StringBuilder builder = new StringBuilder().append("App service certificate order: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDistinguished name: ").append(resource.distinguishedName()) .append("\n\tProduct type: ").append(resource.productType()) .append("\n\tValid years: ").append(resource.validityInYears()) .append("\n\tStatus: ").append(resource.status()) .append("\n\tIssuance time: ").append(resource.lastCertificateIssuanceTime()) .append("\n\tSigned certificate: ").append(resource.signedCertificate() == null ? null : resource.signedCertificate().thumbprint()); System.out.println(builder.toString()); } /** * Print app service plan. * * @param resource an app service plan */ public static void print(AppServicePlan resource) { StringBuilder builder = new StringBuilder().append("App service certificate order: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tPricing tier: ").append(resource.pricingTier()); System.out.println(builder.toString()); } /** * Print a web app. * * @param resource a web app */ public static void print(WebAppBase resource) { StringBuilder builder = new StringBuilder().append("Web app: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tState: ").append(resource.state()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDefault hostname: ").append(resource.defaultHostname()) .append("\n\tApp service plan: ").append(resource.appServicePlanId()) .append("\n\tHost name bindings: "); for (HostnameBinding binding : resource.getHostnameBindings().values()) { builder = builder.append("\n\t\t" + binding.toString()); } builder = builder.append("\n\tSSL bindings: "); for (HostnameSslState binding : resource.hostnameSslStates().values()) { builder = builder.append("\n\t\t" + binding.name() + ": " + binding.sslState()); if (binding.sslState() != null && binding.sslState() != SslState.DISABLED) { builder = builder.append(" - " + binding.thumbprint()); } } builder = builder.append("\n\tApp settings: "); for (AppSetting setting : resource.getAppSettings().values()) { builder = builder.append("\n\t\t" + setting.key() + ": " + setting.value() + (setting.sticky() ? " - slot setting" : "")); } builder = builder.append("\n\tConnection strings: "); for (ConnectionString conn : resource.getConnectionStrings().values()) { builder = builder.append("\n\t\t" + conn.name() + ": " + conn.value() + " - " + conn.type() + (conn.sticky() ? " - slot setting" : "")); } System.out.println(builder.toString()); } /** * Print a web site. * * @param resource a web site */ public static void print(WebSiteBase resource) { StringBuilder builder = new StringBuilder().append("Web app: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tState: ").append(resource.state()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDefault hostname: ").append(resource.defaultHostname()) .append("\n\tApp service plan: ").append(resource.appServicePlanId()); builder = builder.append("\n\tSSL bindings: "); for (HostnameSslState binding : resource.hostnameSslStates().values()) { builder = builder.append("\n\t\t" + binding.name() + ": " + binding.sslState()); if (binding.sslState() != null && binding.sslState() != SslState.DISABLED) { builder = builder.append(" - " + binding.thumbprint()); } } System.out.println(builder.toString()); } /** * Print a traffic manager profile. * * @param profile a traffic manager profile */ public static void print(TrafficManagerProfile profile) { StringBuilder info = new StringBuilder(); info.append("Traffic Manager Profile: ").append(profile.id()) .append("\n\tName: ").append(profile.name()) .append("\n\tResource group: ").append(profile.resourceGroupName()) .append("\n\tRegion: ").append(profile.regionName()) .append("\n\tTags: ").append(profile.tags()) .append("\n\tDNSLabel: ").append(profile.dnsLabel()) .append("\n\tFQDN: ").append(profile.fqdn()) .append("\n\tTTL: ").append(profile.timeToLive()) .append("\n\tEnabled: ").append(profile.isEnabled()) .append("\n\tRoutingMethod: ").append(profile.trafficRoutingMethod()) .append("\n\tMonitor status: ").append(profile.monitorStatus()) .append("\n\tMonitoring port: ").append(profile.monitoringPort()) .append("\n\tMonitoring path: ").append(profile.monitoringPath()); Map<String, TrafficManagerAzureEndpoint> azureEndpoints = profile.azureEndpoints(); if (!azureEndpoints.isEmpty()) { info.append("\n\tAzure endpoints:"); int idx = 1; for (TrafficManagerAzureEndpoint endpoint : azureEndpoints.values()) { info.append("\n\t\tAzure endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tTarget resourceId: ").append(endpoint.targetAzureResourceId()) .append("\n\t\t\tTarget resourceType: ").append(endpoint.targetResourceType()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } Map<String, TrafficManagerExternalEndpoint> externalEndpoints = profile.externalEndpoints(); if (!externalEndpoints.isEmpty()) { info.append("\n\tExternal endpoints:"); int idx = 1; for (TrafficManagerExternalEndpoint endpoint : externalEndpoints.values()) { info.append("\n\t\tExternal endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tFQDN: ").append(endpoint.fqdn()) .append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } Map<String, TrafficManagerNestedProfileEndpoint> nestedProfileEndpoints = profile.nestedProfileEndpoints(); if (!nestedProfileEndpoints.isEmpty()) { info.append("\n\tNested profile endpoints:"); int idx = 1; for (TrafficManagerNestedProfileEndpoint endpoint : nestedProfileEndpoints.values()) { info.append("\n\t\tNested profile endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tNested profileId: ").append(endpoint.nestedProfileId()) .append("\n\t\t\tMinimum child threshold: ").append(endpoint.minimumChildEndpointCount()) .append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } System.out.println(info.toString()); } /** * Print a dns zone. * * @param dnsZone a dns zone */ public static void print(DnsZone dnsZone) { StringBuilder info = new StringBuilder(); info.append("DNS Zone: ").append(dnsZone.id()) .append("\n\tName (Top level domain): ").append(dnsZone.name()) .append("\n\tResource group: ").append(dnsZone.resourceGroupName()) .append("\n\tRegion: ").append(dnsZone.regionName()) .append("\n\tTags: ").append(dnsZone.tags()) .append("\n\tName servers:"); for (String nameServer : dnsZone.nameServers()) { info.append("\n\t\t").append(nameServer); } SoaRecordSet soaRecordSet = dnsZone.getSoaRecordSet(); SoaRecord soaRecord = soaRecordSet.record(); info.append("\n\tSOA Record:") .append("\n\t\tHost:").append(soaRecord.host()) .append("\n\t\tEmail:").append(soaRecord.email()) .append("\n\t\tExpire time (seconds):").append(soaRecord.expireTime()) .append("\n\t\tRefresh time (seconds):").append(soaRecord.refreshTime()) .append("\n\t\tRetry time (seconds):").append(soaRecord.retryTime()) .append("\n\t\tNegative response cache ttl (seconds):").append(soaRecord.minimumTtl()) .append("\n\t\tTTL (seconds):").append(soaRecordSet.timeToLive()); PagedIterable<ARecordSet> aRecordSets = dnsZone.aRecordSets().list(); info.append("\n\tA Record sets:"); for (ARecordSet aRecordSet : aRecordSets) { info.append("\n\t\tId: ").append(aRecordSet.id()) .append("\n\t\tName: ").append(aRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aRecordSet.timeToLive()) .append("\n\t\tIP v4 addresses: "); for (String ipAddress : aRecordSet.ipv4Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<AaaaRecordSet> aaaaRecordSets = dnsZone.aaaaRecordSets().list(); info.append("\n\tAAAA Record sets:"); for (AaaaRecordSet aaaaRecordSet : aaaaRecordSets) { info.append("\n\t\tId: ").append(aaaaRecordSet.id()) .append("\n\t\tName: ").append(aaaaRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aaaaRecordSet.timeToLive()) .append("\n\t\tIP v6 addresses: "); for (String ipAddress : aaaaRecordSet.ipv6Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cNameRecordSets().list(); info.append("\n\tCNAME Record sets:"); for (CnameRecordSet cnameRecordSet : cnameRecordSets) { info.append("\n\t\tId: ").append(cnameRecordSet.id()) .append("\n\t\tName: ").append(cnameRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(cnameRecordSet.timeToLive()) .append("\n\t\tCanonical name: ").append(cnameRecordSet.canonicalName()); } PagedIterable<MxRecordSet> mxRecordSets = dnsZone.mxRecordSets().list(); info.append("\n\tMX Record sets:"); for (MxRecordSet mxRecordSet : mxRecordSets) { info.append("\n\t\tId: ").append(mxRecordSet.id()) .append("\n\t\tName: ").append(mxRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(mxRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (MxRecord mxRecord : mxRecordSet.records()) { info.append("\n\t\t\tExchange server, Preference: ") .append(mxRecord.exchange()) .append(" ") .append(mxRecord.preference()); } } PagedIterable<NsRecordSet> nsRecordSets = dnsZone.nsRecordSets().list(); info.append("\n\tNS Record sets:"); for (NsRecordSet nsRecordSet : nsRecordSets) { info.append("\n\t\tId: ").append(nsRecordSet.id()) .append("\n\t\tName: ").append(nsRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(nsRecordSet.timeToLive()) .append("\n\t\tName servers: "); for (String nameServer : nsRecordSet.nameServers()) { info.append("\n\t\t\t").append(nameServer); } } PagedIterable<PtrRecordSet> ptrRecordSets = dnsZone.ptrRecordSets().list(); info.append("\n\tPTR Record sets:"); for (PtrRecordSet ptrRecordSet : ptrRecordSets) { info.append("\n\t\tId: ").append(ptrRecordSet.id()) .append("\n\t\tName: ").append(ptrRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(ptrRecordSet.timeToLive()) .append("\n\t\tTarget domain names: "); for (String domainNames : ptrRecordSet.targetDomainNames()) { info.append("\n\t\t\t").append(domainNames); } } PagedIterable<SrvRecordSet> srvRecordSets = dnsZone.srvRecordSets().list(); info.append("\n\tSRV Record sets:"); for (SrvRecordSet srvRecordSet : srvRecordSets) { info.append("\n\t\tId: ").append(srvRecordSet.id()) .append("\n\t\tName: ").append(srvRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(srvRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (SrvRecord srvRecord : srvRecordSet.records()) { info.append("\n\t\t\tTarget, Port, Priority, Weight: ") .append(srvRecord.target()) .append(", ") .append(srvRecord.port()) .append(", ") .append(srvRecord.priority()) .append(", ") .append(srvRecord.weight()); } } PagedIterable<TxtRecordSet> txtRecordSets = dnsZone.txtRecordSets().list(); info.append("\n\tTXT Record sets:"); for (TxtRecordSet txtRecordSet : txtRecordSets) { info.append("\n\t\tId: ").append(txtRecordSet.id()) .append("\n\t\tName: ").append(txtRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(txtRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (TxtRecord txtRecord : txtRecordSet.records()) { if (txtRecord.value().size() > 0) { info.append("\n\t\t\tValue: ").append(txtRecord.value().get(0)); } } } System.out.println(info.toString()); } /** * Print a private dns zone. * * @param privateDnsZone a private dns zone */ public static void print(PrivateDnsZone privateDnsZone) { StringBuilder info = new StringBuilder(); info.append("Private DNS Zone: ").append(privateDnsZone.id()) .append("\n\tName (Top level domain): ").append(privateDnsZone.name()) .append("\n\tResource group: ").append(privateDnsZone.resourceGroupName()) .append("\n\tRegion: ").append(privateDnsZone.regionName()) .append("\n\tTags: ").append(privateDnsZone.tags()) .append("\n\tName servers:"); com.azure.resourcemanager.privatedns.models.SoaRecordSet soaRecordSet = privateDnsZone.getSoaRecordSet(); com.azure.resourcemanager.privatedns.models.SoaRecord soaRecord = soaRecordSet.record(); info.append("\n\tSOA Record:") .append("\n\t\tHost:").append(soaRecord.host()) .append("\n\t\tEmail:").append(soaRecord.email()) .append("\n\t\tExpire time (seconds):").append(soaRecord.expireTime()) .append("\n\t\tRefresh time (seconds):").append(soaRecord.refreshTime()) .append("\n\t\tRetry time (seconds):").append(soaRecord.retryTime()) .append("\n\t\tNegative response cache ttl (seconds):").append(soaRecord.minimumTtl()) .append("\n\t\tTTL (seconds):").append(soaRecordSet.timeToLive()); PagedIterable<com.azure.resourcemanager.privatedns.models.ARecordSet> aRecordSets = privateDnsZone .aRecordSets().list(); info.append("\n\tA Record sets:"); for (com.azure.resourcemanager.privatedns.models.ARecordSet aRecordSet : aRecordSets) { info.append("\n\t\tId: ").append(aRecordSet.id()) .append("\n\t\tName: ").append(aRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aRecordSet.timeToLive()) .append("\n\t\tIP v4 addresses: "); for (String ipAddress : aRecordSet.ipv4Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<com.azure.resourcemanager.privatedns.models.AaaaRecordSet> aaaaRecordSets = privateDnsZone .aaaaRecordSets().list(); info.append("\n\tAAAA Record sets:"); for (com.azure.resourcemanager.privatedns.models.AaaaRecordSet aaaaRecordSet : aaaaRecordSets) { info.append("\n\t\tId: ").append(aaaaRecordSet.id()) .append("\n\t\tName: ").append(aaaaRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aaaaRecordSet.timeToLive()) .append("\n\t\tIP v6 addresses: "); for (String ipAddress : aaaaRecordSet.ipv6Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<com.azure.resourcemanager.privatedns.models.CnameRecordSet> cnameRecordSets = privateDnsZone.cnameRecordSets().list(); info.append("\n\tCNAME Record sets:"); for (com.azure.resourcemanager.privatedns.models.CnameRecordSet cnameRecordSet : cnameRecordSets) { info.append("\n\t\tId: ").append(cnameRecordSet.id()) .append("\n\t\tName: ").append(cnameRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(cnameRecordSet.timeToLive()) .append("\n\t\tCanonical name: ").append(cnameRecordSet.canonicalName()); } PagedIterable<com.azure.resourcemanager.privatedns.models.MxRecordSet> mxRecordSets = privateDnsZone.mxRecordSets().list(); info.append("\n\tMX Record sets:"); for (com.azure.resourcemanager.privatedns.models.MxRecordSet mxRecordSet : mxRecordSets) { info.append("\n\t\tId: ").append(mxRecordSet.id()) .append("\n\t\tName: ").append(mxRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(mxRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.MxRecord mxRecord : mxRecordSet.records()) { info.append("\n\t\t\tExchange server, Preference: ") .append(mxRecord.exchange()) .append(" ") .append(mxRecord.preference()); } } PagedIterable<com.azure.resourcemanager.privatedns.models.PtrRecordSet> ptrRecordSets = privateDnsZone .ptrRecordSets().list(); info.append("\n\tPTR Record sets:"); for (com.azure.resourcemanager.privatedns.models.PtrRecordSet ptrRecordSet : ptrRecordSets) { info.append("\n\t\tId: ").append(ptrRecordSet.id()) .append("\n\t\tName: ").append(ptrRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(ptrRecordSet.timeToLive()) .append("\n\t\tTarget domain names: "); for (String domainNames : ptrRecordSet.targetDomainNames()) { info.append("\n\t\t\t").append(domainNames); } } PagedIterable<com.azure.resourcemanager.privatedns.models.SrvRecordSet> srvRecordSets = privateDnsZone .srvRecordSets().list(); info.append("\n\tSRV Record sets:"); for (com.azure.resourcemanager.privatedns.models.SrvRecordSet srvRecordSet : srvRecordSets) { info.append("\n\t\tId: ").append(srvRecordSet.id()) .append("\n\t\tName: ").append(srvRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(srvRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.SrvRecord srvRecord : srvRecordSet.records()) { info.append("\n\t\t\tTarget, Port, Priority, Weight: ") .append(srvRecord.target()) .append(", ") .append(srvRecord.port()) .append(", ") .append(srvRecord.priority()) .append(", ") .append(srvRecord.weight()); } } PagedIterable<com.azure.resourcemanager.privatedns.models.TxtRecordSet> txtRecordSets = privateDnsZone .txtRecordSets().list(); info.append("\n\tTXT Record sets:"); for (com.azure.resourcemanager.privatedns.models.TxtRecordSet txtRecordSet : txtRecordSets) { info.append("\n\t\tId: ").append(txtRecordSet.id()) .append("\n\t\tName: ").append(txtRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(txtRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.TxtRecord txtRecord : txtRecordSet.records()) { if (txtRecord.value().size() > 0) { info.append("\n\t\t\tValue: ").append(txtRecord.value().get(0)); } } } PagedIterable<VirtualNetworkLink> virtualNetworkLinks = privateDnsZone.virtualNetworkLinks().list(); info.append("\n\tVirtual Network Links:"); for (VirtualNetworkLink virtualNetworkLink : virtualNetworkLinks) { info.append("\n\tId: ").append(virtualNetworkLink.id()) .append("\n\tName: ").append(virtualNetworkLink.name()) .append("\n\tReference of Virtual Network: ").append(virtualNetworkLink.referencedVirtualNetworkId()) .append("\n\tRegistration enabled: ").append(virtualNetworkLink.isAutoRegistrationEnabled()); } System.out.println(info.toString()); } /** * Print an Azure Container Registry. * * @param azureRegistry an Azure Container Registry */ public static void print(Registry azureRegistry) { StringBuilder info = new StringBuilder(); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); info.append("Azure Container Registry: ").append(azureRegistry.id()) .append("\n\tName: ").append(azureRegistry.name()) .append("\n\tServer Url: ").append(azureRegistry.loginServerUrl()) .append("\n\tUser: ").append(acrCredentials.username()) .append("\n\tFirst Password: ").append(acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)) .append("\n\tSecond Password: ").append(acrCredentials.accessKeys().get(AccessKeyType.SECONDARY)); System.out.println(info.toString()); } /** * Print an Azure Container Service (AKS). * * @param kubernetesCluster a managed container service */ public static void print(KubernetesCluster kubernetesCluster) { StringBuilder info = new StringBuilder(); info.append("Azure Container Service: ").append(kubernetesCluster.id()) .append("\n\tName: ").append(kubernetesCluster.name()) .append("\n\tFQDN: ").append(kubernetesCluster.fqdn()) .append("\n\tDNS prefix label: ").append(kubernetesCluster.dnsPrefix()) .append("\n\t\tWith Agent pool name: ").append(new ArrayList<>(kubernetesCluster.agentPools().keySet()).get(0)) .append("\n\t\tAgent pool count: ").append(new ArrayList<>(kubernetesCluster.agentPools().values()).get(0).count()) .append("\n\t\tAgent pool VM size: ").append(new ArrayList<>(kubernetesCluster.agentPools().values()).get(0).vmSize().toString()) .append("\n\tLinux user name: ").append(kubernetesCluster.linuxRootUsername()) .append("\n\tSSH key: ").append(kubernetesCluster.sshKey()) .append("\n\tService principal client ID: ").append(kubernetesCluster.servicePrincipalClientId()); System.out.println(info.toString()); } /** * Print an Azure Search Service. * * @param searchService an Azure Search Service */ public static void print(SearchService searchService) { StringBuilder info = new StringBuilder(); AdminKeys adminKeys = searchService.getAdminKeys(); PagedIterable<QueryKey> queryKeys = searchService.listQueryKeys(); info.append("Azure Search: ").append(searchService.id()) .append("\n\tResource group: ").append(searchService.resourceGroupName()) .append("\n\tRegion: ").append(searchService.region()) .append("\n\tTags: ").append(searchService.tags()) .append("\n\tSku: ").append(searchService.sku().name()) .append("\n\tStatus: ").append(searchService.status()) .append("\n\tProvisioning State: ").append(searchService.provisioningState()) .append("\n\tHosting Mode: ").append(searchService.hostingMode()) .append("\n\tReplicas: ").append(searchService.replicaCount()) .append("\n\tPartitions: ").append(searchService.partitionCount()) .append("\n\tPrimary Admin Key: ").append(adminKeys.primaryKey()) .append("\n\tSecondary Admin Key: ").append(adminKeys.secondaryKey()) .append("\n\tQuery keys:"); for (QueryKey queryKey : queryKeys) { info.append("\n\t\tKey name: ").append(queryKey.name()); info.append("\n\t\t Value: ").append(queryKey.key()); } System.out.println(info.toString()); } /** * Retrieve the secondary service principal client ID. * * @param envSecondaryServicePrincipal an Azure Container Registry * @return a service principal client ID * @throws IOException exception */
notAction -> dataAction
public static String getSecondaryServicePrincipalClientID(String envSecondaryServicePrincipal) throws IOException { String content = new String(Files.readAllBytes(new File(envSecondaryServicePrincipal).toPath()), StandardCharsets.UTF_8).trim(); HashMap<String, String> auth = new HashMap<>(); if (content.startsWith("{")) { auth = new JacksonAdapter().deserialize(content, auth.getClass(), SerializerEncoding.JSON); return auth.get("clientId"); } else { Properties authSettings = new Properties(); try (FileInputStream credentialsFileStream = new FileInputStream(new File(envSecondaryServicePrincipal))) { authSettings.load(credentialsFileStream); } return authSettings.getProperty("client"); } } /** * Retrieve the secondary service principal secret. * * @param envSecondaryServicePrincipal an Azure Container Registry * @return a service principal secret * @throws IOException exception */ public static String getSecondaryServicePrincipalSecret(String envSecondaryServicePrincipal) throws IOException { String content = new String(Files.readAllBytes(new File(envSecondaryServicePrincipal).toPath()), StandardCharsets.UTF_8).trim(); HashMap<String, String> auth = new HashMap<>(); if (content.startsWith("{")) { auth = new JacksonAdapter().deserialize(content, auth.getClass(), SerializerEncoding.JSON); return auth.get("clientSecret"); } else { Properties authSettings = new Properties(); try (FileInputStream credentialsFileStream = new FileInputStream(new File(envSecondaryServicePrincipal))) { authSettings.load(credentialsFileStream); } return authSettings.getProperty("key"); } } /** * This method creates a certificate for given password. * * @param certPath location of certificate file * @param pfxPath location of pfx file * @param alias User alias * @param password alias password * @param cnName domain name * @param dnsName dns name in subject alternate name * @throws IOException IO Exception */ public static void createCertificate(String certPath, String pfxPath, String alias, String password, String cnName, String dnsName) throws IOException { if (new File(pfxPath).exists()) { return; } String validityInDays = "3650"; String keyAlg = "RSA"; String sigAlg = "SHA1withRSA"; String keySize = "2048"; String storeType = "pkcs12"; String command = "keytool"; String jdkPath = System.getProperty("java.home"); if (jdkPath != null && !jdkPath.isEmpty()) { jdkPath = jdkPath.concat("\\bin"); if (new File(jdkPath).isDirectory()) { command = String.format("%s%s%s", jdkPath, File.separator, command); } } else { return; } String[] commandArgs = {command, "-genkey", "-alias", alias, "-keystore", pfxPath, "-storepass", password, "-validity", validityInDays, "-keyalg", keyAlg, "-sigalg", sigAlg, "-keysize", keySize, "-storetype", storeType, "-dname", "CN=" + cnName, "-ext", "EKU=1.3.6.1.5.5.7.3.1"}; if (dnsName != null) { List<String> args = new ArrayList<>(Arrays.asList(commandArgs)); args.add("-ext"); args.add("san=dns:" + dnsName); commandArgs = args.toArray(new String[0]); } Utils.cmdInvocation(commandArgs, true); File pfxFile = new File(pfxPath); if (pfxFile.exists()) { String[] certCommandArgs = {command, "-export", "-alias", alias, "-storetype", storeType, "-keystore", pfxPath, "-storepass", password, "-rfc", "-file", certPath}; Utils.cmdInvocation(certCommandArgs, true); File cerFile = new File(pfxPath); if (!cerFile.exists()) { throw new IOException( "Error occurred while creating certificate" + String.join(" ", certCommandArgs)); } } else { throw new IOException("Error occurred while creating certificates" + String.join(" ", commandArgs)); } } /** * This method is used for invoking native commands. * * @param command :- command to invoke. * @param ignoreErrorStream : Boolean which controls whether to throw exception or not * based on error stream. * @return result :- depending on the method invocation. * @throws IOException exceptions thrown from the execution */ public static String cmdInvocation(String[] command, boolean ignoreErrorStream) throws IOException { String result = ""; String error = ""; Process process = new ProcessBuilder(command).start(); try ( InputStream inputStream = process.getInputStream(); InputStream errorStream = process.getErrorStream(); BufferedReader br = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); BufferedReader ebr = new BufferedReader(new InputStreamReader(errorStream, StandardCharsets.UTF_8)); ) { result = br.readLine(); process.waitFor(); error = ebr.readLine(); if (error != null && (!"".equals(error))) { if (!ignoreErrorStream) { throw new IOException(error, null); } } } catch (Exception e) { throw new RuntimeException("Exception occurred while invoking command", e); } return result; } /** * Prints information for passed SQL Server. * * @param sqlServer sqlServer to be printed */ public static void print(SqlServer sqlServer) { StringBuilder builder = new StringBuilder().append("Sql Server: ").append(sqlServer.id()) .append("Name: ").append(sqlServer.name()) .append("\n\tResource group: ").append(sqlServer.resourceGroupName()) .append("\n\tRegion: ").append(sqlServer.region()) .append("\n\tSqlServer version: ").append(sqlServer.version()) .append("\n\tFully qualified name for Sql Server: ").append(sqlServer.fullyQualifiedDomainName()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL Database. * * @param database database to be printed */ public static void print(SqlDatabase database) { StringBuilder builder = new StringBuilder().append("Sql Database: ").append(database.id()) .append("Name: ").append(database.name()) .append("\n\tResource group: ").append(database.resourceGroupName()) .append("\n\tRegion: ").append(database.region()) .append("\n\tSqlServer Name: ").append(database.sqlServerName()) .append("\n\tEdition of SQL database: ").append(database.edition()) .append("\n\tCollation of SQL database: ").append(database.collation()) .append("\n\tCreation date of SQL database: ").append(database.creationDate()) .append("\n\tIs data warehouse: ").append(database.isDataWarehouse()) .append("\n\tRequested service objective of SQL database: ").append(database.requestedServiceObjectiveName()) .append("\n\tName of current service objective of SQL database: ").append(database.currentServiceObjectiveName()) .append("\n\tMax size bytes of SQL database: ").append(database.maxSizeBytes()) .append("\n\tDefault secondary location of SQL database: ").append(database.defaultSecondaryLocation()); System.out.println(builder.toString()); } /** * Prints information for the passed firewall rule. * * @param firewallRule firewall rule to be printed. */ public static void print(SqlFirewallRule firewallRule) { StringBuilder builder = new StringBuilder().append("Sql firewall rule: ").append(firewallRule.id()) .append("Name: ").append(firewallRule.name()) .append("\n\tResource group: ").append(firewallRule.resourceGroupName()) .append("\n\tRegion: ").append(firewallRule.region()) .append("\n\tSqlServer Name: ").append(firewallRule.sqlServerName()) .append("\n\tStart IP Address of the firewall rule: ").append(firewallRule.startIpAddress()) .append("\n\tEnd IP Address of the firewall rule: ").append(firewallRule.endIpAddress()); System.out.println(builder.toString()); } /** * Prints information for the passed virtual network rule. * * @param virtualNetworkRule virtual network rule to be printed. */ public static void print(SqlVirtualNetworkRule virtualNetworkRule) { StringBuilder builder = new StringBuilder().append("SQL virtual network rule: ").append(virtualNetworkRule.id()) .append("Name: ").append(virtualNetworkRule.name()) .append("\n\tResource group: ").append(virtualNetworkRule.resourceGroupName()) .append("\n\tSqlServer Name: ").append(virtualNetworkRule.sqlServerName()) .append("\n\tSubnet ID: ").append(virtualNetworkRule.subnetId()) .append("\n\tState: ").append(virtualNetworkRule.state()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL subscription usage metric. * * @param subscriptionUsageMetric metric to be printed. */ public static void print(SqlSubscriptionUsageMetric subscriptionUsageMetric) { StringBuilder builder = new StringBuilder().append("SQL Subscription Usage Metric: ").append(subscriptionUsageMetric.id()) .append("Name: ").append(subscriptionUsageMetric.name()) .append("\n\tDisplay Name: ").append(subscriptionUsageMetric.displayName()) .append("\n\tCurrent Value: ").append(subscriptionUsageMetric.currentValue()) .append("\n\tLimit: ").append(subscriptionUsageMetric.limit()) .append("\n\tUnit: ").append(subscriptionUsageMetric.unit()) .append("\n\tType: ").append(subscriptionUsageMetric.type()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL database usage metric. * * @param dbUsageMetric metric to be printed. */ public static void print(SqlDatabaseUsageMetric dbUsageMetric) { StringBuilder builder = new StringBuilder().append("SQL Database Usage Metric") .append("Name: ").append(dbUsageMetric.name()) .append("\n\tDisplay Name: ").append(dbUsageMetric.displayName()) .append("\n\tCurrent Value: ").append(dbUsageMetric.currentValue()) .append("\n\tLimit: ").append(dbUsageMetric.limit()) .append("\n\tUnit: ").append(dbUsageMetric.unit()); System.out.println(builder.toString()); } /** * Prints information for the passed Failover Group. * * @param failoverGroup the SQL Failover Group to be printed. */ public static void print(SqlFailoverGroup failoverGroup) { StringBuilder builder = new StringBuilder().append("SQL Failover Group: ").append(failoverGroup.id()) .append("Name: ").append(failoverGroup.name()) .append("\n\tResource group: ").append(failoverGroup.resourceGroupName()) .append("\n\tSqlServer Name: ").append(failoverGroup.sqlServerName()) .append("\n\tRead-write endpoint policy: ").append(failoverGroup.readWriteEndpointPolicy()) .append("\n\tData loss grace period: ").append(failoverGroup.readWriteEndpointDataLossGracePeriodMinutes()) .append("\n\tRead-only endpoint policy: ").append(failoverGroup.readOnlyEndpointPolicy()) .append("\n\tReplication state: ").append(failoverGroup.replicationState()) .append("\n\tReplication role: ").append(failoverGroup.replicationRole()); builder.append("\n\tPartner Servers: "); for (PartnerInfo item : failoverGroup.partnerServers()) { builder .append("\n\t\tId: ").append(item.id()) .append("\n\t\tLocation: ").append(item.location()) .append("\n\t\tReplication role: ").append(item.replicationRole()); } builder.append("\n\tDatabases: "); for (String databaseId : failoverGroup.databases()) { builder.append("\n\t\tID: ").append(databaseId); } System.out.println(builder.toString()); } /** * Prints information for the passed SQL server key. * * @param serverKey virtual network rule to be printed. */ public static void print(SqlServerKey serverKey) { StringBuilder builder = new StringBuilder().append("SQL server key: ").append(serverKey.id()) .append("Name: ").append(serverKey.name()) .append("\n\tResource group: ").append(serverKey.resourceGroupName()) .append("\n\tSqlServer Name: ").append(serverKey.sqlServerName()) .append("\n\tRegion: ").append(serverKey.region() != null ? serverKey.region().name() : "") .append("\n\tServer Key Type: ").append(serverKey.serverKeyType()) .append("\n\tServer Key URI: ").append(serverKey.uri()) .append("\n\tServer Key Thumbprint: ").append(serverKey.thumbprint()) .append("\n\tServer Key Creation Date: ").append(serverKey.creationDate() != null ? serverKey.creationDate().toString() : ""); System.out.println(builder.toString()); } /** * Prints information of the elastic pool passed in. * * @param elasticPool elastic pool to be printed */ public static void print(SqlElasticPool elasticPool) { StringBuilder builder = new StringBuilder().append("Sql elastic pool: ").append(elasticPool.id()) .append("Name: ").append(elasticPool.name()) .append("\n\tResource group: ").append(elasticPool.resourceGroupName()) .append("\n\tRegion: ").append(elasticPool.region()) .append("\n\tSqlServer Name: ").append(elasticPool.sqlServerName()) .append("\n\tEdition of elastic pool: ").append(elasticPool.edition()) .append("\n\tTotal number of DTUs in the elastic pool: ").append(elasticPool.dtu()) .append("\n\tMaximum DTUs a database can get in elastic pool: ").append(elasticPool.databaseDtuMax()) .append("\n\tMinimum DTUs a database is guaranteed in elastic pool: ").append(elasticPool.databaseDtuMin()) .append("\n\tCreation date for the elastic pool: ").append(elasticPool.creationDate()) .append("\n\tState of the elastic pool: ").append(elasticPool.state()) .append("\n\tStorage capacity in MBs for the elastic pool: ").append(elasticPool.storageCapacity()); System.out.println(builder.toString()); } /** * Prints information of the elastic pool activity. * * @param elasticPoolActivity elastic pool activity to be printed */ public static void print(ElasticPoolActivity elasticPoolActivity) { StringBuilder builder = new StringBuilder().append("Sql elastic pool activity: ").append(elasticPoolActivity.id()) .append("Name: ").append(elasticPoolActivity.name()) .append("\n\tResource group: ").append(elasticPoolActivity.resourceGroupName()) .append("\n\tState: ").append(elasticPoolActivity.state()) .append("\n\tElastic pool name: ").append(elasticPoolActivity.elasticPoolName()) .append("\n\tStart time of activity: ").append(elasticPoolActivity.startTime()) .append("\n\tEnd time of activity: ").append(elasticPoolActivity.endTime()) .append("\n\tError code of activity: ").append(elasticPoolActivity.errorCode()) .append("\n\tError message of activity: ").append(elasticPoolActivity.errorMessage()) .append("\n\tError severity of activity: ").append(elasticPoolActivity.errorSeverity()) .append("\n\tOperation: ").append(elasticPoolActivity.operation()) .append("\n\tCompleted percentage of activity: ").append(elasticPoolActivity.percentComplete()); System.out.println(builder.toString()); } /** * Print an application gateway. * * @param resource an application gateway */ public static void print(ApplicationGateway resource) { StringBuilder info = new StringBuilder(); info.append("Application gateway: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tSKU: ").append(resource.sku().toString()) .append("\n\tOperational state: ").append(resource.operationalState()) .append("\n\tInternet-facing? ").append(resource.isPublic()) .append("\n\tInternal? ").append(resource.isPrivate()) .append("\n\tDefault private IP address: ").append(resource.privateIpAddress()) .append("\n\tPrivate IP address allocation method: ").append(resource.privateIpAllocationMethod()) .append("\n\tDisabled SSL protocols: ").append(resource.disabledSslProtocols().toString()); Map<String, ApplicationGatewayIpConfiguration> ipConfigs = resource.ipConfigurations(); info.append("\n\tIP configurations: ").append(ipConfigs.size()); for (ApplicationGatewayIpConfiguration ipConfig : ipConfigs.values()) { info.append("\n\t\tName: ").append(ipConfig.name()) .append("\n\t\t\tNetwork id: ").append(ipConfig.networkId()) .append("\n\t\t\tSubnet name: ").append(ipConfig.subnetName()); } Map<String, ApplicationGatewayFrontend> frontends = resource.frontends(); info.append("\n\tFrontends: ").append(frontends.size()); for (ApplicationGatewayFrontend frontend : frontends.values()) { info.append("\n\t\tName: ").append(frontend.name()) .append("\n\t\t\tPublic? ").append(frontend.isPublic()); if (frontend.isPublic()) { info.append("\n\t\t\tPublic IP address ID: ").append(frontend.publicIpAddressId()); } if (frontend.isPrivate()) { info.append("\n\t\t\tPrivate IP address: ").append(frontend.privateIpAddress()) .append("\n\t\t\tPrivate IP allocation method: ").append(frontend.privateIpAllocationMethod()) .append("\n\t\t\tSubnet name: ").append(frontend.subnetName()) .append("\n\t\t\tVirtual network ID: ").append(frontend.networkId()); } } Map<String, ApplicationGatewayBackend> backends = resource.backends(); info.append("\n\tBackends: ").append(backends.size()); for (ApplicationGatewayBackend backend : backends.values()) { info.append("\n\t\tName: ").append(backend.name()) .append("\n\t\t\tAssociated NIC IP configuration IDs: ").append(backend.backendNicIPConfigurationNames().keySet()); Collection<ApplicationGatewayBackendAddress> addresses = backend.addresses(); info.append("\n\t\t\tAddresses: ").append(addresses.size()); for (ApplicationGatewayBackendAddress address : addresses) { info.append("\n\t\t\t\tFQDN: ").append(address.fqdn()) .append("\n\t\t\t\tIP: ").append(address.ipAddress()); } } Map<String, ApplicationGatewayBackendHttpConfiguration> httpConfigs = resource.backendHttpConfigurations(); info.append("\n\tHTTP Configurations: ").append(httpConfigs.size()); for (ApplicationGatewayBackendHttpConfiguration httpConfig : httpConfigs.values()) { info.append("\n\t\tName: ").append(httpConfig.name()) .append("\n\t\t\tCookie based affinity: ").append(httpConfig.cookieBasedAffinity()) .append("\n\t\t\tPort: ").append(httpConfig.port()) .append("\n\t\t\tRequest timeout in seconds: ").append(httpConfig.requestTimeout()) .append("\n\t\t\tProtocol: ").append(httpConfig.protocol()) .append("\n\t\tHost header: ").append(httpConfig.hostHeader()) .append("\n\t\tHost header comes from backend? ").append(httpConfig.isHostHeaderFromBackend()) .append("\n\t\tConnection draining timeout in seconds: ").append(httpConfig.connectionDrainingTimeoutInSeconds()) .append("\n\t\tAffinity cookie name: ").append(httpConfig.affinityCookieName()) .append("\n\t\tPath: ").append(httpConfig.path()); ApplicationGatewayProbe probe = httpConfig.probe(); if (probe != null) { info.append("\n\t\tProbe: " + probe.name()); } info.append("\n\t\tIs probe enabled? ").append(httpConfig.isProbeEnabled()); } Map<String, ApplicationGatewaySslCertificate> sslCerts = resource.sslCertificates(); info.append("\n\tSSL certificates: ").append(sslCerts.size()); for (ApplicationGatewaySslCertificate cert : sslCerts.values()) { info.append("\n\t\tName: ").append(cert.name()) .append("\n\t\t\tCert data: ").append(cert.publicData()); } Map<String, ApplicationGatewayRedirectConfiguration> redirects = resource.redirectConfigurations(); info.append("\n\tRedirect configurations: ").append(redirects.size()); for (ApplicationGatewayRedirectConfiguration redirect : redirects.values()) { info.append("\n\t\tName: ").append(redirect.name()) .append("\n\t\tTarget URL: ").append(redirect.type()) .append("\n\t\tTarget URL: ").append(redirect.targetUrl()) .append("\n\t\tTarget listener: ").append(redirect.targetListener() != null ? redirect.targetListener().name() : null) .append("\n\t\tIs path included? ").append(redirect.isPathIncluded()) .append("\n\t\tIs query string included? ").append(redirect.isQueryStringIncluded()) .append("\n\t\tReferencing request routing rules: ").append(redirect.requestRoutingRules().values()); } Map<String, ApplicationGatewayListener> listeners = resource.listeners(); info.append("\n\tHTTP listeners: ").append(listeners.size()); for (ApplicationGatewayListener listener : listeners.values()) { info.append("\n\t\tName: ").append(listener.name()) .append("\n\t\t\tHost name: ").append(listener.hostname()) .append("\n\t\t\tServer name indication required? ").append(listener.requiresServerNameIndication()) .append("\n\t\t\tAssociated frontend name: ").append(listener.frontend().name()) .append("\n\t\t\tFrontend port name: ").append(listener.frontendPortName()) .append("\n\t\t\tFrontend port number: ").append(listener.frontendPortNumber()) .append("\n\t\t\tProtocol: ").append(listener.protocol().toString()); if (listener.sslCertificate() != null) { info.append("\n\t\t\tAssociated SSL certificate: ").append(listener.sslCertificate().name()); } } Map<String, ApplicationGatewayProbe> probes = resource.probes(); info.append("\n\tProbes: ").append(probes.size()); for (ApplicationGatewayProbe probe : probes.values()) { info.append("\n\t\tName: ").append(probe.name()) .append("\n\t\tProtocol:").append(probe.protocol().toString()) .append("\n\t\tInterval in seconds: ").append(probe.timeBetweenProbesInSeconds()) .append("\n\t\tRetries: ").append(probe.retriesBeforeUnhealthy()) .append("\n\t\tTimeout: ").append(probe.timeoutInSeconds()) .append("\n\t\tHost: ").append(probe.host()) .append("\n\t\tHealthy HTTP response status code ranges: ").append(probe.healthyHttpResponseStatusCodeRanges()) .append("\n\t\tHealthy HTTP response body contents: ").append(probe.healthyHttpResponseBodyContents()); } Map<String, ApplicationGatewayRequestRoutingRule> rules = resource.requestRoutingRules(); info.append("\n\tRequest routing rules: ").append(rules.size()); for (ApplicationGatewayRequestRoutingRule rule : rules.values()) { info.append("\n\t\tName: ").append(rule.name()) .append("\n\t\tType: ").append(rule.ruleType()) .append("\n\t\tPublic IP address ID: ").append(rule.publicIpAddressId()) .append("\n\t\tHost name: ").append(rule.hostname()) .append("\n\t\tServer name indication required? ").append(rule.requiresServerNameIndication()) .append("\n\t\tFrontend port: ").append(rule.frontendPort()) .append("\n\t\tFrontend protocol: ").append(rule.frontendProtocol().toString()) .append("\n\t\tBackend port: ").append(rule.backendPort()) .append("\n\t\tCookie based affinity enabled? ").append(rule.cookieBasedAffinity()) .append("\n\t\tRedirect configuration: ").append(rule.redirectConfiguration() != null ? rule.redirectConfiguration().name() : "(none)"); Collection<ApplicationGatewayBackendAddress> addresses = rule.backendAddresses(); info.append("\n\t\t\tBackend addresses: ").append(addresses.size()); for (ApplicationGatewayBackendAddress address : addresses) { info.append("\n\t\t\t\t") .append(address.fqdn()) .append(" [").append(address.ipAddress()).append("]"); } info.append("\n\t\t\tSSL certificate name: "); ApplicationGatewaySslCertificate cert = rule.sslCertificate(); if (cert == null) { info.append("(None)"); } else { info.append(cert.name()); } info.append("\n\t\t\tAssociated backend address pool: "); ApplicationGatewayBackend backend = rule.backend(); if (backend == null) { info.append("(None)"); } else { info.append(backend.name()); } info.append("\n\t\t\tAssociated backend HTTP settings configuration: "); ApplicationGatewayBackendHttpConfiguration config = rule.backendHttpConfiguration(); if (config == null) { info.append("(None)"); } else { info.append(config.name()); } info.append("\n\t\t\tAssociated frontend listener: "); ApplicationGatewayListener listener = rule.listener(); if (listener == null) { info.append("(None)"); } else { info.append(config.name()); } } System.out.println(info.toString()); } /** * Prints information of a virtual machine custom image. * * @param image the image */ public static void print(VirtualMachineCustomImage image) { StringBuilder builder = new StringBuilder().append("Virtual machine custom image: ").append(image.id()) .append("Name: ").append(image.name()) .append("\n\tResource group: ").append(image.resourceGroupName()) .append("\n\tCreated from virtual machine: ").append(image.sourceVirtualMachineId()); builder.append("\n\tOS disk image: ") .append("\n\t\tOperating system: ").append(image.osDiskImage().osType()) .append("\n\t\tOperating system state: ").append(image.osDiskImage().osState()) .append("\n\t\tCaching: ").append(image.osDiskImage().caching()) .append("\n\t\tSize (GB): ").append(image.osDiskImage().diskSizeGB()); if (image.isCreatedFromVirtualMachine()) { builder.append("\n\t\tSource virtual machine: ").append(image.sourceVirtualMachineId()); } if (image.osDiskImage().managedDisk() != null) { builder.append("\n\t\tSource managed disk: ").append(image.osDiskImage().managedDisk().id()); } if (image.osDiskImage().snapshot() != null) { builder.append("\n\t\tSource snapshot: ").append(image.osDiskImage().snapshot().id()); } if (image.osDiskImage().blobUri() != null) { builder.append("\n\t\tSource un-managed vhd: ").append(image.osDiskImage().blobUri()); } if (image.dataDiskImages() != null) { for (ImageDataDisk diskImage : image.dataDiskImages().values()) { builder.append("\n\tDisk Image (Lun) .append("\n\t\tCaching: ").append(diskImage.caching()) .append("\n\t\tSize (GB): ").append(diskImage.diskSizeGB()); if (image.isCreatedFromVirtualMachine()) { builder.append("\n\t\tSource virtual machine: ").append(image.sourceVirtualMachineId()); } if (diskImage.managedDisk() != null) { builder.append("\n\t\tSource managed disk: ").append(diskImage.managedDisk().id()); } if (diskImage.snapshot() != null) { builder.append("\n\t\tSource snapshot: ").append(diskImage.snapshot().id()); } if (diskImage.blobUri() != null) { builder.append("\n\t\tSource un-managed vhd: ").append(diskImage.blobUri()); } } } System.out.println(builder.toString()); } /** * Uploads a file to an Azure app service for Web App. * * @param profile the publishing profile for the app service. * @param fileName the name of the file on server * @param file the local file */ public static void uploadFileViaFtp(PublishingProfile profile, String fileName, InputStream file) { String path = "./site/wwwroot/webapps"; uploadFileViaFtp(profile, fileName, file, path); } /** * Uploads a file to an Azure app service for Function App. * * @param profile the publishing profile for the app service. * @param fileName the name of the file on server * @param file the local file */ public static void uploadFileForFunctionViaFtp(PublishingProfile profile, String fileName, InputStream file) { String path = "./site/wwwroot"; uploadFileViaFtp(profile, fileName, file, path); } private static void uploadFileViaFtp(PublishingProfile profile, String fileName, InputStream file, String path) { FTPClient ftpClient = new FTPClient(); String[] ftpUrlSegments = profile.ftpUrl().split("/", 2); String server = ftpUrlSegments[0]; if (fileName.contains("/")) { int lastslash = fileName.lastIndexOf('/'); path = path + "/" + fileName.substring(0, lastslash); fileName = fileName.substring(lastslash + 1); } try { ftpClient.connect(server); ftpClient.enterLocalPassiveMode(); ftpClient.login(profile.ftpUsername(), profile.ftpPassword()); ftpClient.setFileType(FTP.BINARY_FILE_TYPE); for (String segment : path.split("/")) { if (!ftpClient.changeWorkingDirectory(segment)) { ftpClient.makeDirectory(segment); ftpClient.changeWorkingDirectory(segment); } } ftpClient.storeFile(fileName, file); ftpClient.disconnect(); } catch (IOException e) { e.printStackTrace(); } } /** * Print service bus namespace info. * * @param serviceBusNamespace a service bus namespace */ public static void print(ServiceBusNamespace serviceBusNamespace) { StringBuilder builder = new StringBuilder() .append("Service bus Namespace: ").append(serviceBusNamespace.id()) .append("\n\tName: ").append(serviceBusNamespace.name()) .append("\n\tRegion: ").append(serviceBusNamespace.regionName()) .append("\n\tResourceGroupName: ").append(serviceBusNamespace.resourceGroupName()) .append("\n\tCreatedAt: ").append(serviceBusNamespace.createdAt()) .append("\n\tUpdatedAt: ").append(serviceBusNamespace.updatedAt()) .append("\n\tDnsLabel: ").append(serviceBusNamespace.dnsLabel()) .append("\n\tFQDN: ").append(serviceBusNamespace.fqdn()) .append("\n\tSku: ") .append("\n\t\tCapacity: ").append(serviceBusNamespace.sku().capacity()) .append("\n\t\tSkuName: ").append(serviceBusNamespace.sku().name()) .append("\n\t\tTier: ").append(serviceBusNamespace.sku().tier()); System.out.println(builder.toString()); } /** * Print service bus queue info. * * @param queue a service bus queue */ public static void print(Queue queue) { StringBuilder builder = new StringBuilder() .append("Service bus Queue: ").append(queue.id()) .append("\n\tName: ").append(queue.name()) .append("\n\tResourceGroupName: ").append(queue.resourceGroupName()) .append("\n\tCreatedAt: ").append(queue.createdAt()) .append("\n\tUpdatedAt: ").append(queue.updatedAt()) .append("\n\tAccessedAt: ").append(queue.accessedAt()) .append("\n\tActiveMessageCount: ").append(queue.activeMessageCount()) .append("\n\tCurrentSizeInBytes: ").append(queue.currentSizeInBytes()) .append("\n\tDeadLetterMessageCount: ").append(queue.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(queue.defaultMessageTtlDuration()) .append("\n\tDuplicateMessageDetectionHistoryDuration: ").append(queue.duplicateMessageDetectionHistoryDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(queue.isBatchedOperationsEnabled()) .append("\n\tIsDeadLetteringEnabledForExpiredMessages: ").append(queue.isDeadLetteringEnabledForExpiredMessages()) .append("\n\tIsDuplicateDetectionEnabled: ").append(queue.isDuplicateDetectionEnabled()) .append("\n\tIsExpressEnabled: ").append(queue.isExpressEnabled()) .append("\n\tIsPartitioningEnabled: ").append(queue.isPartitioningEnabled()) .append("\n\tIsSessionEnabled: ").append(queue.isSessionEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(queue.deleteOnIdleDurationInMinutes()) .append("\n\tMaxDeliveryCountBeforeDeadLetteringMessage: ").append(queue.maxDeliveryCountBeforeDeadLetteringMessage()) .append("\n\tMaxSizeInMB: ").append(queue.maxSizeInMB()) .append("\n\tMessageCount: ").append(queue.messageCount()) .append("\n\tScheduledMessageCount: ").append(queue.scheduledMessageCount()) .append("\n\tStatus: ").append(queue.status()) .append("\n\tTransferMessageCount: ").append(queue.transferMessageCount()) .append("\n\tLockDurationInSeconds: ").append(queue.lockDurationInSeconds()) .append("\n\tTransferDeadLetterMessageCount: ").append(queue.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print service bus queue authorization keys info. * * @param queueAuthorizationRule a service bus queue authorization keys */ public static void print(QueueAuthorizationRule queueAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus queue authorization rule: ").append(queueAuthorizationRule.id()) .append("\n\tName: ").append(queueAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(queueAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(queueAuthorizationRule.namespaceName()) .append("\n\tQueue Name: ").append(queueAuthorizationRule.queueName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = queueAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print service bus namespace authorization keys info. * * @param keys a service bus namespace authorization keys */ public static void print(AuthorizationKeys keys) { StringBuilder builder = new StringBuilder() .append("Authorization keys: ") .append("\n\tPrimaryKey: ").append(keys.primaryKey()) .append("\n\tPrimaryConnectionString: ").append(keys.primaryConnectionString()) .append("\n\tSecondaryKey: ").append(keys.secondaryKey()) .append("\n\tSecondaryConnectionString: ").append(keys.secondaryConnectionString()); System.out.println(builder.toString()); } /** * Print service bus namespace authorization rule info. * * @param namespaceAuthorizationRule a service bus namespace authorization rule */ public static void print(NamespaceAuthorizationRule namespaceAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus queue authorization rule: ").append(namespaceAuthorizationRule.id()) .append("\n\tName: ").append(namespaceAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(namespaceAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(namespaceAuthorizationRule.namespaceName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = namespaceAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print service bus topic info. * * @param topic a service bus topic */ public static void print(Topic topic) { StringBuilder builder = new StringBuilder() .append("Service bus topic: ").append(topic.id()) .append("\n\tName: ").append(topic.name()) .append("\n\tResourceGroupName: ").append(topic.resourceGroupName()) .append("\n\tCreatedAt: ").append(topic.createdAt()) .append("\n\tUpdatedAt: ").append(topic.updatedAt()) .append("\n\tAccessedAt: ").append(topic.accessedAt()) .append("\n\tActiveMessageCount: ").append(topic.activeMessageCount()) .append("\n\tCurrentSizeInBytes: ").append(topic.currentSizeInBytes()) .append("\n\tDeadLetterMessageCount: ").append(topic.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(topic.defaultMessageTtlDuration()) .append("\n\tDuplicateMessageDetectionHistoryDuration: ").append(topic.duplicateMessageDetectionHistoryDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(topic.isBatchedOperationsEnabled()) .append("\n\tIsDuplicateDetectionEnabled: ").append(topic.isDuplicateDetectionEnabled()) .append("\n\tIsExpressEnabled: ").append(topic.isExpressEnabled()) .append("\n\tIsPartitioningEnabled: ").append(topic.isPartitioningEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(topic.deleteOnIdleDurationInMinutes()) .append("\n\tMaxSizeInMB: ").append(topic.maxSizeInMB()) .append("\n\tScheduledMessageCount: ").append(topic.scheduledMessageCount()) .append("\n\tStatus: ").append(topic.status()) .append("\n\tTransferMessageCount: ").append(topic.transferMessageCount()) .append("\n\tSubscriptionCount: ").append(topic.subscriptionCount()) .append("\n\tTransferDeadLetterMessageCount: ").append(topic.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print service bus subscription info. * * @param serviceBusSubscription a service bus subscription */ public static void print(ServiceBusSubscription serviceBusSubscription) { StringBuilder builder = new StringBuilder() .append("Service bus subscription: ").append(serviceBusSubscription.id()) .append("\n\tName: ").append(serviceBusSubscription.name()) .append("\n\tResourceGroupName: ").append(serviceBusSubscription.resourceGroupName()) .append("\n\tCreatedAt: ").append(serviceBusSubscription.createdAt()) .append("\n\tUpdatedAt: ").append(serviceBusSubscription.updatedAt()) .append("\n\tAccessedAt: ").append(serviceBusSubscription.accessedAt()) .append("\n\tActiveMessageCount: ").append(serviceBusSubscription.activeMessageCount()) .append("\n\tDeadLetterMessageCount: ").append(serviceBusSubscription.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(serviceBusSubscription.defaultMessageTtlDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(serviceBusSubscription.isBatchedOperationsEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(serviceBusSubscription.deleteOnIdleDurationInMinutes()) .append("\n\tScheduledMessageCount: ").append(serviceBusSubscription.scheduledMessageCount()) .append("\n\tStatus: ").append(serviceBusSubscription.status()) .append("\n\tTransferMessageCount: ").append(serviceBusSubscription.transferMessageCount()) .append("\n\tIsDeadLetteringEnabledForExpiredMessages: ").append(serviceBusSubscription.isDeadLetteringEnabledForExpiredMessages()) .append("\n\tIsSessionEnabled: ").append(serviceBusSubscription.isSessionEnabled()) .append("\n\tLockDurationInSeconds: ").append(serviceBusSubscription.lockDurationInSeconds()) .append("\n\tMaxDeliveryCountBeforeDeadLetteringMessage: ").append(serviceBusSubscription.maxDeliveryCountBeforeDeadLetteringMessage()) .append("\n\tIsDeadLetteringEnabledForFilterEvaluationFailedMessages: ").append(serviceBusSubscription.isDeadLetteringEnabledForFilterEvaluationFailedMessages()) .append("\n\tTransferMessageCount: ").append(serviceBusSubscription.transferMessageCount()) .append("\n\tTransferDeadLetterMessageCount: ").append(serviceBusSubscription.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print topic Authorization Rule info. * * @param topicAuthorizationRule a topic Authorization Rule */ public static void print(TopicAuthorizationRule topicAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus topic authorization rule: ").append(topicAuthorizationRule.id()) .append("\n\tName: ").append(topicAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(topicAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(topicAuthorizationRule.namespaceName()) .append("\n\tTopic Name: ").append(topicAuthorizationRule.topicName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = topicAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print CosmosDB info. * * @param cosmosDBAccount a CosmosDB */ public static void print(CosmosDBAccount cosmosDBAccount) { StringBuilder builder = new StringBuilder() .append("CosmosDB: ").append(cosmosDBAccount.id()) .append("\n\tName: ").append(cosmosDBAccount.name()) .append("\n\tResourceGroupName: ").append(cosmosDBAccount.resourceGroupName()) .append("\n\tKind: ").append(cosmosDBAccount.kind().toString()) .append("\n\tDefault consistency level: ").append(cosmosDBAccount.consistencyPolicy().defaultConsistencyLevel()) .append("\n\tIP range filter: ").append(cosmosDBAccount.ipRangeFilter()); DatabaseAccountListKeysResult keys = cosmosDBAccount.listKeys(); DatabaseAccountListReadOnlyKeysResult readOnlyKeys = cosmosDBAccount.listReadOnlyKeys(); builder .append("\n\tPrimary Master Key: ").append(keys.primaryMasterKey()) .append("\n\tSecondary Master Key: ").append(keys.secondaryMasterKey()) .append("\n\tPrimary Read-Only Key: ").append(readOnlyKeys.primaryReadonlyMasterKey()) .append("\n\tSecondary Read-Only Key: ").append(readOnlyKeys.secondaryReadonlyMasterKey()); for (Location writeReplica : cosmosDBAccount.writableReplications()) { builder.append("\n\t\tWrite replication: ") .append("\n\t\t\tName :").append(writeReplica.locationName()); } builder.append("\n\tNumber of read replications: ").append(cosmosDBAccount.readableReplications().size()); for (Location readReplica : cosmosDBAccount.readableReplications()) { builder.append("\n\t\tRead replication: ") .append("\n\t\t\tName :").append(readReplica.locationName()); } } /** * Print Active Directory User info. * * @param user active directory user */ public static void print(ActiveDirectoryUser user) { StringBuilder builder = new StringBuilder() .append("Active Directory User: ").append(user.id()) .append("\n\tName: ").append(user.name()) .append("\n\tMail: ").append(user.mail()) .append("\n\tMail Nickname: ").append(user.mailNickname()) .append("\n\tUser Principal Name: ").append(user.userPrincipalName()); System.out.println(builder.toString()); } /** * Print Active Directory User info. * * @param role role definition */ public static void print(RoleDefinition role) { StringBuilder builder = new StringBuilder() .append("Role Definition: ").append(role.id()) .append("\n\tName: ").append(role.name()) .append("\n\tRole Name: ").append(role.roleName()) .append("\n\tType: ").append(role.type()) .append("\n\tDescription: ").append(role.description()) .append("\n\tType: ").append(role.type()); Set<Permission> permissions = role.permissions(); builder.append("\n\tPermissions: ").append(permissions.size()); for (Permission permission : permissions) { builder.append("\n\t\tPermission Actions: " + permission.actions().size()); for (String action : permission.actions()) { builder.append("\n\t\t\tName :").append(action); } builder.append("\n\t\tPermission Not Actions: " + permission.notActions().size()); for (String notAction : permission.notActions()) { builder.append("\n\t\t\tName :").append(notAction); } builder.append("\n\t\tPermission Data Actions: " + permission.dataActions().size()); for (String notAction : permission.dataActions()) { builder.append("\n\t\t\tName :").append(notAction); } } Set<String> assignableScopes = role.assignableScopes(); builder.append("\n\tAssignable scopes: ").append(assignableScopes.size()); for (String scope : assignableScopes) { builder.append("\n\t\tAssignable Scope: ") .append("\n\t\t\tName :").append(scope); } System.out.println(builder.toString()); } /** * Print Role Assignment info. * * @param roleAssignment role assignment */ public static void print(RoleAssignment roleAssignment) { StringBuilder builder = new StringBuilder() .append("Role Assignment: ") .append("\n\tScope: ").append(roleAssignment.scope()) .append("\n\tPrincipal Id: ").append(roleAssignment.principalId()) .append("\n\tRole Definition Id: ").append(roleAssignment.roleDefinitionId()); System.out.println(builder.toString()); } /** * Print Active Directory Group info. * * @param group active directory group */ public static void print(ActiveDirectoryGroup group) { StringBuilder builder = new StringBuilder() .append("Active Directory Group: ").append(group.id()) .append("\n\tName: ").append(group.name()) .append("\n\tMail: ").append(group.mail()) .append("\n\tSecurity Enabled: ").append(group.securityEnabled()) .append("\n\tGroup members:"); for (ActiveDirectoryObject object : group.listMembers()) { builder.append("\n\t\tType: ").append(object.getClass().getSimpleName()) .append("\tName: ").append(object.name()); } System.out.println(builder.toString()); } /** * Print Active Directory Application info. * * @param application active directory application */ public static void print(ActiveDirectoryApplication application) { StringBuilder builder = new StringBuilder() .append("Active Directory Application: ").append(application.id()) .append("\n\tName: ").append(application.name()) .append("\n\tSign on URL: ").append(application.signOnUrl()) .append("\n\tReply URLs:"); for (String replyUrl : application.replyUrls()) { builder.append("\n\t\t").append(replyUrl); } System.out.println(builder.toString()); } /** * Print Service Principal info. * * @param servicePrincipal service principal */ public static void print(ServicePrincipal servicePrincipal) { StringBuilder builder = new StringBuilder() .append("Service Principal: ").append(servicePrincipal.id()) .append("\n\tName: ").append(servicePrincipal.name()) .append("\n\tApplication Id: ").append(servicePrincipal.applicationId()); List<String> names = servicePrincipal.servicePrincipalNames(); builder.append("\n\tNames: ").append(names.size()); for (String name : names) { builder.append("\n\t\tName: ").append(name); } System.out.println(builder.toString()); } /** * Print Network Watcher info. * * @param nw network watcher */ public static void print(NetworkWatcher nw) { StringBuilder builder = new StringBuilder() .append("Network Watcher: ").append(nw.id()) .append("\n\tName: ").append(nw.name()) .append("\n\tResource group name: ").append(nw.resourceGroupName()) .append("\n\tRegion name: ").append(nw.regionName()); System.out.println(builder.toString()); } /** * Print packet capture info. * * @param resource packet capture */ public static void print(PacketCapture resource) { StringBuilder sb = new StringBuilder().append("Packet Capture: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tTarget id: ").append(resource.targetId()) .append("\n\tTime limit in seconds: ").append(resource.timeLimitInSeconds()) .append("\n\tBytes to capture per packet: ").append(resource.bytesToCapturePerPacket()) .append("\n\tProvisioning state: ").append(resource.provisioningState()) .append("\n\tStorage location:") .append("\n\tStorage account id: ").append(resource.storageLocation().storageId()) .append("\n\tStorage account path: ").append(resource.storageLocation().storagePath()) .append("\n\tFile path: ").append(resource.storageLocation().filePath()) .append("\n\t Packet capture filters: ").append(resource.filters().size()); for (PacketCaptureFilter filter : resource.filters()) { sb.append("\n\t\tProtocol: ").append(filter.protocol()); sb.append("\n\t\tLocal IP address: ").append(filter.localIpAddress()); sb.append("\n\t\tRemote IP address: ").append(filter.remoteIpAddress()); sb.append("\n\t\tLocal port: ").append(filter.localPort()); sb.append("\n\t\tRemote port: ").append(filter.remotePort()); } System.out.println(sb.toString()); } /** * Print verification IP flow info. * * @param resource IP flow verification info */ public static void print(VerificationIPFlow resource) { System.out.println(new StringBuilder("IP flow verification: ") .append("\n\tAccess: ").append(resource.access()) .append("\n\tRule name: ").append(resource.ruleName()) .toString()); } /** * Print topology info. * * @param resource topology */ public static void print(Topology resource) { StringBuilder sb = new StringBuilder().append("Topology: ").append(resource.id()) .append("\n\tTopology parameters: ") .append("\n\t\tResource group: ").append(resource.topologyParameters().targetResourceGroupName()) .append("\n\t\tVirtual network: ").append(resource.topologyParameters().targetVirtualNetwork() == null ? "" : resource.topologyParameters().targetVirtualNetwork().id()) .append("\n\t\tSubnet id: ").append(resource.topologyParameters().targetSubnet() == null ? "" : resource.topologyParameters().targetSubnet().id()) .append("\n\tCreated time: ").append(resource.createdTime()) .append("\n\tLast modified time: ").append(resource.lastModifiedTime()); for (TopologyResource tr : resource.resources().values()) { sb.append("\n\tTopology resource: ").append(tr.id()) .append("\n\t\tName: ").append(tr.name()) .append("\n\t\tLocation: ").append(tr.location()) .append("\n\t\tAssociations:"); for (TopologyAssociation association : tr.associations()) { sb.append("\n\t\t\tName:").append(association.name()) .append("\n\t\t\tResource id:").append(association.resourceId()) .append("\n\t\t\tAssociation type:").append(association.associationType()); } } System.out.println(sb.toString()); } /** * Print flow log settings info. * * @param resource flow log settings */ public static void print(FlowLogSettings resource) { System.out.println(new StringBuilder().append("Flow log settings: ") .append("Target resource id: ").append(resource.targetResourceId()) .append("\n\tFlow log enabled: ").append(resource.enabled()) .append("\n\tStorage account id: ").append(resource.storageId()) .append("\n\tRetention policy enabled: ").append(resource.isRetentionEnabled()) .append("\n\tRetention policy days: ").append(resource.retentionDays()) .toString()); } /** * Print availability set info. * * @param resource an availability set */ public static void print(SecurityGroupView resource) { StringBuilder sb = new StringBuilder().append("Security group view: ") .append("\n\tVirtual machine id: ").append(resource.vmId()); for (SecurityGroupNetworkInterface sgni : resource.networkInterfaces().values()) { sb.append("\n\tSecurity group network interface:").append(sgni.id()) .append("\n\t\tSecurity group network interface:") .append("\n\t\tEffective security rules:"); for (EffectiveNetworkSecurityRule rule : sgni.securityRuleAssociations().effectiveSecurityRules()) { sb.append("\n\t\t\tName: ").append(rule.name()) .append("\n\t\t\tDirection: ").append(rule.direction()) .append("\n\t\t\tAccess: ").append(rule.access()) .append("\n\t\t\tPriority: ").append(rule.priority()) .append("\n\t\t\tSource address prefix: ").append(rule.sourceAddressPrefix()) .append("\n\t\t\tSource port range: ").append(rule.sourcePortRange()) .append("\n\t\t\tDestination address prefix: ").append(rule.destinationAddressPrefix()) .append("\n\t\t\tDestination port range: ").append(rule.destinationPortRange()) .append("\n\t\t\tProtocol: ").append(rule.protocol()); } sb.append("\n\t\tSubnet:").append(sgni.securityRuleAssociations().subnetAssociation().id()); printSecurityRule(sb, sgni.securityRuleAssociations().subnetAssociation().securityRules()); if (sgni.securityRuleAssociations().networkInterfaceAssociation() != null) { sb.append("\n\t\tNetwork interface:").append(sgni.securityRuleAssociations().networkInterfaceAssociation().id()); printSecurityRule(sb, sgni.securityRuleAssociations().networkInterfaceAssociation().securityRules()); } sb.append("\n\t\tDefault security rules:"); printSecurityRule(sb, sgni.securityRuleAssociations().defaultSecurityRules()); } System.out.println(sb.toString()); } private static void printSecurityRule(StringBuilder sb, List<SecurityRuleInner> rules) { for (SecurityRuleInner rule : rules) { sb.append("\n\t\t\tName: ").append(rule.name()) .append("\n\t\t\tDirection: ").append(rule.direction()) .append("\n\t\t\tAccess: ").append(rule.access()) .append("\n\t\t\tPriority: ").append(rule.priority()) .append("\n\t\t\tSource address prefix: ").append(rule.sourceAddressPrefix()) .append("\n\t\t\tSource port range: ").append(rule.sourcePortRange()) .append("\n\t\t\tDestination address prefix: ").append(rule.destinationAddressPrefix()) .append("\n\t\t\tDestination port range: ").append(rule.destinationPortRange()) .append("\n\t\t\tProtocol: ").append(rule.protocol()) .append("\n\t\t\tDescription: ").append(rule.description()) .append("\n\t\t\tProvisioning state: ").append(rule.provisioningState()); } } /** * Print next hop info. * * @param resource an availability set */ public static void print(NextHop resource) { System.out.println(new StringBuilder("Next hop: ") .append("Next hop type: ").append(resource.nextHopType()) .append("\n\tNext hop ip address: ").append(resource.nextHopIpAddress()) .append("\n\tRoute table id: ").append(resource.routeTableId()) .toString()); } /** * Print container group info. * * @param resource a container group */ public static void print(ContainerGroup resource) { StringBuilder info = new StringBuilder().append("Container Group: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tOS type: ").append(resource.osType()); if (resource.ipAddress() != null) { info.append("\n\tPublic IP address: ").append(resource.ipAddress()); } if (resource.externalTcpPorts() != null) { info.append("\n\tExternal TCP ports:"); for (int port : resource.externalTcpPorts()) { info.append(" ").append(port); } } if (resource.externalUdpPorts() != null) { info.append("\n\tExternal UDP ports:"); for (int port : resource.externalUdpPorts()) { info.append(" ").append(port); } } if (resource.imageRegistryServers() != null) { info.append("\n\tPrivate Docker image registries:"); for (String server : resource.imageRegistryServers()) { info.append(" ").append(server); } } if (resource.volumes() != null) { info.append("\n\tVolume mapping: "); for (Map.Entry<String, Volume> entry : resource.volumes().entrySet()) { info.append("\n\t\tName: ").append(entry.getKey()).append(" -> ") .append(entry.getValue().azureFile() != null ? entry.getValue().azureFile().shareName() : "empty direcory volume"); } } if (resource.containers() != null) { info.append("\n\tContainer instances: "); for (Map.Entry<String, Container> entry : resource.containers().entrySet()) { Container container = entry.getValue(); info.append("\n\t\tName: ").append(entry.getKey()).append(" -> ").append(container.image()); info.append("\n\t\t\tResources: "); info.append(container.resources().requests().cpu()).append("CPUs "); info.append(container.resources().requests().memoryInGB()).append("GB"); info.append("\n\t\t\tPorts:"); for (ContainerPort port : container.ports()) { info.append(" ").append(port.port()); } if (container.volumeMounts() != null) { info.append("\n\t\t\tVolume mounts:"); for (VolumeMount volumeMount : container.volumeMounts()) { info.append(" ").append(volumeMount.name()).append("->").append(volumeMount.mountPath()); } } if (container.command() != null) { info.append("\n\t\t\tStart commands:"); for (String command : container.command()) { info.append("\n\t\t\t\t").append(command); } } if (container.environmentVariables() != null) { info.append("\n\t\t\tENV vars:"); for (EnvironmentVariable envVar : container.environmentVariables()) { info.append("\n\t\t\t\t").append(envVar.name()).append("=").append(envVar.value()); } } } } System.out.println(info.toString()); } /** * Print event hub namespace. * * @param resource a virtual machine */ public static void print(EventHubNamespace resource) { StringBuilder info = new StringBuilder(); info.append("Eventhub Namespace: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tAzureInsightMetricId: ").append(resource.azureInsightMetricId()) .append("\n\tIsAutoScale enabled: ").append(resource.isAutoScaleEnabled()) .append("\n\tServiceBus endpoint: ").append(resource.serviceBusEndpoint()) .append("\n\tThroughPut upper limit: ").append(resource.throughputUnitsUpperLimit()) .append("\n\tCurrent ThroughPut: ").append(resource.currentThroughputUnits()) .append("\n\tCreated time: ").append(resource.createdAt()) .append("\n\tUpdated time: ").append(resource.updatedAt()); System.out.println(info.toString()); } /** * Print event hub. * * @param resource event hub */ public static void print(EventHub resource) { StringBuilder info = new StringBuilder(); info.append("Eventhub: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tNamespace resource group: ").append(resource.namespaceResourceGroupName()) .append("\n\tNamespace: ").append(resource.namespaceName()) .append("\n\tIs data capture enabled: ").append(resource.isDataCaptureEnabled()) .append("\n\tPartition ids: ").append(resource.partitionIds()); if (resource.isDataCaptureEnabled()) { info.append("\n\t\t\tData capture window size in MB: ").append(resource.dataCaptureWindowSizeInMB()); info.append("\n\t\t\tData capture window size in seconds: ").append(resource.dataCaptureWindowSizeInSeconds()); if (resource.captureDestination() != null) { info.append("\n\t\t\tData capture storage account: ").append(resource.captureDestination().storageAccountResourceId()); info.append("\n\t\t\tData capture storage container: ").append(resource.captureDestination().blobContainer()); } } System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing. * * @param resource event hub namespace disaster recovery pairing */ public static void print(EventHubDisasterRecoveryPairing resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tPrimary namespace resource group name: ").append(resource.primaryNamespaceResourceGroupName()) .append("\n\tPrimary namespace name: ").append(resource.primaryNamespaceName()) .append("\n\tSecondary namespace: ").append(resource.secondaryNamespaceId()) .append("\n\tNamespace role: ").append(resource.namespaceRole()); System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing auth rules. * * @param resource event hub namespace disaster recovery pairing auth rule */ public static void print(DisasterRecoveryPairingAuthorizationRule resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing auth rule: ").append(resource.name()); List<String> rightsStr = new ArrayList<>(); for (AccessRights rights : resource.rights()) { rightsStr.add(rights.toString()); } info.append("\n\tRights: ").append(rightsStr); System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing auth rule key. * * @param resource event hub namespace disaster recovery pairing auth rule key */ public static void print(DisasterRecoveryPairingAuthorizationKey resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing auth key: ") .append("\n\t Alias primary connection string: ").append(resource.aliasPrimaryConnectionString()) .append("\n\t Alias secondary connection string: ").append(resource.aliasSecondaryConnectionString()) .append("\n\t Primary key: ").append(resource.primaryKey()) .append("\n\t Secondary key: ").append(resource.secondaryKey()) .append("\n\t Primary connection string: ").append(resource.primaryConnectionString()) .append("\n\t Secondary connection string: ").append(resource.secondaryConnectionString()); System.out.println(info.toString()); } /** * Print event hub consumer group. * * @param resource event hub consumer group */ public static void print(EventHubConsumerGroup resource) { StringBuilder info = new StringBuilder(); info.append("Event hub consumer group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tNamespace resource group: ").append(resource.namespaceResourceGroupName()) .append("\n\tNamespace: ").append(resource.namespaceName()) .append("\n\tEvent hub name: ").append(resource.eventHubName()) .append("\n\tUser metadata: ").append(resource.userMetadata()); System.out.println(info.toString()); } /** * Print Diagnostic Setting. * * @param resource Diagnostic Setting instance */ public static void print(DiagnosticSetting resource) { StringBuilder info = new StringBuilder("Diagnostic Setting: ") .append("\n\tId: ").append(resource.id()) .append("\n\tAssociated resource Id: ").append(resource.resourceId()) .append("\n\tName: ").append(resource.name()) .append("\n\tStorage Account Id: ").append(resource.storageAccountId()) .append("\n\tEventHub Namespace Autorization Rule Id: ").append(resource.eventHubAuthorizationRuleId()) .append("\n\tEventHub name: ").append(resource.eventHubName()) .append("\n\tLog Analytics workspace Id: ").append(resource.workspaceId()); if (resource.logs() != null && !resource.logs().isEmpty()) { info.append("\n\tLog Settings: "); for (LogSettings ls : resource.logs()) { info.append("\n\t\tCategory: ").append(ls.category()); info.append("\n\t\tRetention policy: "); if (ls.retentionPolicy() != null) { info.append(ls.retentionPolicy().days() + " days"); } else { info.append("NONE"); } } } if (resource.metrics() != null && !resource.metrics().isEmpty()) { info.append("\n\tMetric Settings: "); for (MetricSettings ls : resource.metrics()) { info.append("\n\t\tCategory: ").append(ls.category()); info.append("\n\t\tTimegrain: ").append(ls.timeGrain()); info.append("\n\t\tRetention policy: "); if (ls.retentionPolicy() != null) { info.append(ls.retentionPolicy().days() + " days"); } else { info.append("NONE"); } } } System.out.println(info.toString()); } /** * Print Action group settings. * * @param actionGroup action group instance */ public static void print(ActionGroup actionGroup) { StringBuilder info = new StringBuilder("Action Group: ") .append("\n\tId: ").append(actionGroup.id()) .append("\n\tName: ").append(actionGroup.name()) .append("\n\tShort Name: ").append(actionGroup.shortName()); if (actionGroup.emailReceivers() != null && !actionGroup.emailReceivers().isEmpty()) { info.append("\n\tEmail receivers: "); for (EmailReceiver er : actionGroup.emailReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tEMail: ").append(er.emailAddress()); info.append("\n\t\tStatus: ").append(er.status()); info.append("\n\t\t==="); } } if (actionGroup.smsReceivers() != null && !actionGroup.smsReceivers().isEmpty()) { info.append("\n\tSMS text message receivers: "); for (SmsReceiver er : actionGroup.smsReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tPhone: ").append(er.countryCode() + er.phoneNumber()); info.append("\n\t\tStatus: ").append(er.status()); info.append("\n\t\t==="); } } if (actionGroup.webhookReceivers() != null && !actionGroup.webhookReceivers().isEmpty()) { info.append("\n\tWebhook receivers: "); for (WebhookReceiver er : actionGroup.webhookReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tURI: ").append(er.serviceUri()); info.append("\n\t\t==="); } } if (actionGroup.pushNotificationReceivers() != null && !actionGroup.pushNotificationReceivers().isEmpty()) { info.append("\n\tApp Push Notification receivers: "); for (AzureAppPushReceiver er : actionGroup.pushNotificationReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tEmail: ").append(er.emailAddress()); info.append("\n\t\t==="); } } if (actionGroup.voiceReceivers() != null && !actionGroup.voiceReceivers().isEmpty()) { info.append("\n\tVoice Message receivers: "); for (VoiceReceiver er : actionGroup.voiceReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tPhone: ").append(er.countryCode() + er.phoneNumber()); info.append("\n\t\t==="); } } if (actionGroup.automationRunbookReceivers() != null && !actionGroup.automationRunbookReceivers().isEmpty()) { info.append("\n\tAutomation Runbook receivers: "); for (AutomationRunbookReceiver er : actionGroup.automationRunbookReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tRunbook Name: ").append(er.runbookName()); info.append("\n\t\tAccount Id: ").append(er.automationAccountId()); info.append("\n\t\tIs Global: ").append(er.isGlobalRunbook()); info.append("\n\t\tService URI: ").append(er.serviceUri()); info.append("\n\t\tWebhook resource Id: ").append(er.webhookResourceId()); info.append("\n\t\t==="); } } if (actionGroup.azureFunctionReceivers() != null && !actionGroup.azureFunctionReceivers().isEmpty()) { info.append("\n\tAzure Functions receivers: "); for (AzureFunctionReceiver er : actionGroup.azureFunctionReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tFunction Name: ").append(er.functionName()); info.append("\n\t\tFunction App Resource Id: ").append(er.functionAppResourceId()); info.append("\n\t\tFunction Trigger URI: ").append(er.httpTriggerUrl()); info.append("\n\t\t==="); } } if (actionGroup.logicAppReceivers() != null && !actionGroup.logicAppReceivers().isEmpty()) { info.append("\n\tLogic App receivers: "); for (LogicAppReceiver er : actionGroup.logicAppReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tResource Id: ").append(er.resourceId()); info.append("\n\t\tCallback URL: ").append(er.callbackUrl()); info.append("\n\t\t==="); } } if (actionGroup.itsmReceivers() != null && !actionGroup.itsmReceivers().isEmpty()) { info.append("\n\tITSM receivers: "); for (ItsmReceiver er : actionGroup.itsmReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tWorkspace Id: ").append(er.workspaceId()); info.append("\n\t\tConnection Id: ").append(er.connectionId()); info.append("\n\t\tRegion: ").append(er.region()); info.append("\n\t\tTicket Configuration: ").append(er.ticketConfiguration()); info.append("\n\t\t==="); } } System.out.println(info.toString()); } /** * Print activity log alert settings. * * @param activityLogAlert activity log instance */ public static void print(ActivityLogAlert activityLogAlert) { StringBuilder info = new StringBuilder("Activity Log Alert: ") .append("\n\tId: ").append(activityLogAlert.id()) .append("\n\tName: ").append(activityLogAlert.name()) .append("\n\tDescription: ").append(activityLogAlert.description()) .append("\n\tIs Enabled: ").append(activityLogAlert.enabled()); if (activityLogAlert.scopes() != null && !activityLogAlert.scopes().isEmpty()) { info.append("\n\tScopes: "); for (String er : activityLogAlert.scopes()) { info.append("\n\t\tId: ").append(er); } } if (activityLogAlert.actionGroupIds() != null && !activityLogAlert.actionGroupIds().isEmpty()) { info.append("\n\tAction Groups: "); for (String er : activityLogAlert.actionGroupIds()) { info.append("\n\t\tAction Group Id: ").append(er); } } if (activityLogAlert.equalsConditions() != null && !activityLogAlert.equalsConditions().isEmpty()) { info.append("\n\tAlert conditions (when all of is true): "); for (Map.Entry<String, String> er : activityLogAlert.equalsConditions().entrySet()) { info.append("\n\t\t'").append(er.getKey()).append("' equals '").append(er.getValue()).append("'"); } } System.out.println(info.toString()); } /** * Print metric alert settings. * * @param metricAlert metric alert instance */ public static void print(MetricAlert metricAlert) { StringBuilder info = new StringBuilder("Metric Alert: ") .append("\n\tId: ").append(metricAlert.id()) .append("\n\tName: ").append(metricAlert.name()) .append("\n\tDescription: ").append(metricAlert.description()) .append("\n\tIs Enabled: ").append(metricAlert.enabled()) .append("\n\tIs Auto Mitigated: ").append(metricAlert.autoMitigate()) .append("\n\tSeverity: ").append(metricAlert.severity()) .append("\n\tWindow Size: ").append(metricAlert.windowSize()) .append("\n\tEvaluation Frequency: ").append(metricAlert.evaluationFrequency()); if (metricAlert.scopes() != null && !metricAlert.scopes().isEmpty()) { info.append("\n\tScopes: "); for (String er : metricAlert.scopes()) { info.append("\n\t\tId: ").append(er); } } if (metricAlert.actionGroupIds() != null && !metricAlert.actionGroupIds().isEmpty()) { info.append("\n\tAction Groups: "); for (String er : metricAlert.actionGroupIds()) { info.append("\n\t\tAction Group Id: ").append(er); } } if (metricAlert.alertCriterias() != null && !metricAlert.alertCriterias().isEmpty()) { info.append("\n\tAlert conditions (when all of is true): "); for (Map.Entry<String, MetricAlertCondition> er : metricAlert.alertCriterias().entrySet()) { MetricAlertCondition alertCondition = er.getValue(); info.append("\n\t\tCondition name: ").append(er.getKey()) .append("\n\t\tSignal name: ").append(alertCondition.metricName()) .append("\n\t\tMetric Namespace: ").append(alertCondition.metricNamespace()) .append("\n\t\tOperator: ").append(alertCondition.condition()) .append("\n\t\tThreshold: ").append(alertCondition.threshold()) .append("\n\t\tTime Aggregation: ").append(alertCondition.timeAggregation()); if (alertCondition.dimensions() != null && !alertCondition.dimensions().isEmpty()) { for (MetricDimension dimon : alertCondition.dimensions()) { info.append("\n\t\tDimension Filter: ").append("Name [").append(dimon.name()).append("] operator [Include] values["); for (String vals : dimon.values()) { info.append(vals).append(", "); } info.append("]"); } } } } System.out.println(info.toString()); } /** * Print spring service settings. * * @param springService spring service instance */ public static void print(SpringService springService) { StringBuilder info = new StringBuilder("Spring Service: ") .append("\n\tId: ").append(springService.id()) .append("\n\tName: ").append(springService.name()) .append("\n\tResource Group: ").append(springService.resourceGroupName()) .append("\n\tRegion: ").append(springService.region()) .append("\n\tTags: ").append(springService.tags()); ConfigServerProperties serverProperties = springService.getServerProperties(); if (serverProperties != null && serverProperties.provisioningState() != null && serverProperties.provisioningState().equals(ConfigServerState.SUCCEEDED) && serverProperties.configServer() != null) { info.append("\n\tProperties: "); if (serverProperties.configServer().gitProperty() != null) { info.append("\n\t\tGit: ").append(serverProperties.configServer().gitProperty().uri()); } } if (springService.sku() != null) { info.append("\n\tSku: ") .append("\n\t\tName: ").append(springService.sku().name()) .append("\n\t\tTier: ").append(springService.sku().tier()) .append("\n\t\tCapacity: ").append(springService.sku().capacity()); } MonitoringSettingProperties monitoringSettingProperties = springService.getMonitoringSetting(); if (monitoringSettingProperties != null && monitoringSettingProperties.provisioningState() != null && monitoringSettingProperties.provisioningState().equals(MonitoringSettingState.SUCCEEDED)) { info.append("\n\tTrace: ") .append("\n\t\tEnabled: ").append(monitoringSettingProperties.traceEnabled()) .append("\n\t\tApp Insight Instrumentation Key: ").append(monitoringSettingProperties.appInsightsInstrumentationKey()); } System.out.println(info.toString()); } /** * Print spring app settings. * * @param springApp spring app instance */ public static void print(SpringApp springApp) { StringBuilder info = new StringBuilder("Spring Service: ") .append("\n\tId: ").append(springApp.id()) .append("\n\tName: ").append(springApp.name()) .append("\n\tPublic Endpoint: ").append(springApp.isPublic()) .append("\n\tUrl: ").append(springApp.url()) .append("\n\tHttps Only: ").append(springApp.isHttpsOnly()) .append("\n\tFully Qualified Domain Name: ").append(springApp.fqdn()) .append("\n\tActive Deployment Name: ").append(springApp.activeDeploymentName()); if (springApp.temporaryDisk() != null) { info.append("\n\tTemporary Disk:") .append("\n\t\tSize In GB: ").append(springApp.temporaryDisk().sizeInGB()) .append("\n\t\tMount Path: ").append(springApp.temporaryDisk().mountPath()); } if (springApp.persistentDisk() != null) { info.append("\n\tPersistent Disk:") .append("\n\t\tSize In GB: ").append(springApp.persistentDisk().sizeInGB()) .append("\n\t\tMount Path: ").append(springApp.persistentDisk().mountPath()); } if (springApp.identity() != null) { info.append("\n\tIdentity:") .append("\n\t\tType: ").append(springApp.identity().type()) .append("\n\t\tPrincipal Id: ").append(springApp.identity().principalId()) .append("\n\t\tTenant Id: ").append(springApp.identity().tenantId()); } System.out.println(info.toString()); } /** * Print private link resource. * * @param privateLinkResource the private link resource */ public static void print(PrivateLinkResource privateLinkResource) { StringBuilder info = new StringBuilder("Private Link Resource: ") .append("\n\tGroup ID: ").append(privateLinkResource.groupId()) .append("\n\tRequired Member Names: ").append(privateLinkResource.requiredMemberNames()) .append("\n\tRequired DNS Zone Names: ").append(privateLinkResource.requiredDnsZoneNames()); System.out.println(info); } /** * Print private endpoint. * * @param privateEndpoint the private endpoint */ public static void print(PrivateEndpoint privateEndpoint) { StringBuilder info = new StringBuilder("Private Endpoint: ") .append("\n\tId: ").append(privateEndpoint.id()) .append("\n\tName: ").append(privateEndpoint.name()); if (privateEndpoint.privateLinkServiceConnections() != null && !privateEndpoint.privateLinkServiceConnections().isEmpty()) { for (PrivateEndpoint.PrivateLinkServiceConnection connection : privateEndpoint.privateLinkServiceConnections().values()) { info .append("\n\t\tPrivate Link Service Connection Name: ").append(connection.name()) .append("\n\t\tPrivate Link Resource ID: ").append(connection.privateLinkResourceId()) .append("\n\t\tSub Resource Names: ").append(connection.subResourceNames()) .append("\n\t\tProvision Status: ").append(connection.state().status()); } } if (privateEndpoint.privateLinkServiceConnections() != null && !privateEndpoint.privateLinkServiceConnections().isEmpty()) { info.append("\n\tPrivate Link Service Connections:"); for (PrivateEndpoint.PrivateLinkServiceConnection connection : privateEndpoint.privateLinkServiceConnections().values()) { info .append("\n\t\tName: ").append(connection.name()) .append("\n\t\tPrivate Link Resource ID: ").append(connection.privateLinkResourceId()) .append("\n\t\tSub Resource Names: ").append(connection.subResourceNames()) .append("\n\t\tStatus: ").append(connection.state().status()); } } if (privateEndpoint.customDnsConfigurations() != null && !privateEndpoint.customDnsConfigurations().isEmpty()) { info.append("\n\tCustom DNS Configure:"); for (CustomDnsConfigPropertiesFormat customDns : privateEndpoint.customDnsConfigurations()) { info .append("\n\t\tFQDN: ").append(customDns.fqdn()) .append("\n\t\tIP Address: ").append(customDns.ipAddresses()); } } System.out.println(info); } /** * Sends a GET request to target URL. * <p> * Retry logic tuned for AppService. * The method does not handle 301 redirect. * * @param urlString the target URL. * @return Content of the HTTP response. */ public static String sendGetRequest(String urlString) { HttpRequest request = new HttpRequest(HttpMethod.GET, urlString); Mono<Response<String>> response = stringResponse(HTTP_PIPELINE.send(request) .flatMap(response1 -> { int code = response1.getStatusCode(); if (code == 200 || code == 400 || code == 404) { return Mono.just(response1); } else { return Mono.error(new HttpResponseException(response1)); } }) .retryWhen(Retry .fixedDelay(5, Duration.ofSeconds(30)) .filter(t -> { boolean retry = false; if (t instanceof TimeoutException) { retry = true; } else if (t instanceof HttpResponseException && ((HttpResponseException) t).getResponse().getStatusCode() == 503) { retry = true; } if (retry) { LOGGER.info("retry GET request to {}", urlString); } return retry; }))); Response<String> ret = response.block(); return ret == null ? null : ret.getValue(); } /** * Sends a POST request to target URL. * <p> * Retry logic tuned for AppService. * * @param urlString the target URL. * @param body the request body. * @return Content of the HTTP response. * */ public static String sendPostRequest(String urlString, String body) { try { HttpRequest request = new HttpRequest(HttpMethod.POST, urlString).setBody(body); Mono<Response<String>> response = stringResponse(HTTP_PIPELINE.send(request) .flatMap(response1 -> { int code = response1.getStatusCode(); if (code == 200 || code == 400 || code == 404) { return Mono.just(response1); } else { return Mono.error(new HttpResponseException(response1)); } }) .retryWhen(Retry .fixedDelay(5, Duration.ofSeconds(30)) .filter(t -> { boolean retry = false; if (t instanceof TimeoutException) { retry = true; } if (retry) { LOGGER.info("retry POST request to {}", urlString); } return retry; }))); Response<String> ret = response.block(); return ret == null ? null : ret.getValue(); } catch (Exception e) { LOGGER.logThrowableAsError(e); return null; } } private static Mono<Response<String>> stringResponse(Mono<HttpResponse> responseMono) { return responseMono.flatMap(response -> response.getBodyAsString() .map(str -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), str))); } private static final HttpPipeline HTTP_PIPELINE = new HttpPipelineBuilder() .policies( new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)), new RetryPolicy("Retry-After", ChronoUnit.SECONDS)) .build(); /** * Get the size of the iterable. * * @param iterable iterable to count size * @param <T> generic type parameter of the iterable * @return size of the iterable */ public static <T> int getSize(Iterable<T> iterable) { int res = 0; Iterator<T> iterator = iterable.iterator(); while (iterator.hasNext()) { iterator.next(); res++; } return res; } }
builder.append("\n\t\t\tName :").append(notAction);
public static String getSecondaryServicePrincipalClientID(String envSecondaryServicePrincipal) throws IOException { String content = new String(Files.readAllBytes(new File(envSecondaryServicePrincipal).toPath()), StandardCharsets.UTF_8).trim(); HashMap<String, String> auth = new HashMap<>(); if (content.startsWith("{")) { auth = new JacksonAdapter().deserialize(content, auth.getClass(), SerializerEncoding.JSON); return auth.get("clientId"); } else { Properties authSettings = new Properties(); try (FileInputStream credentialsFileStream = new FileInputStream(new File(envSecondaryServicePrincipal))) { authSettings.load(credentialsFileStream); } return authSettings.getProperty("client"); } } /** * Retrieve the secondary service principal secret. * * @param envSecondaryServicePrincipal an Azure Container Registry * @return a service principal secret * @throws IOException exception */ public static String getSecondaryServicePrincipalSecret(String envSecondaryServicePrincipal) throws IOException { String content = new String(Files.readAllBytes(new File(envSecondaryServicePrincipal).toPath()), StandardCharsets.UTF_8).trim(); HashMap<String, String> auth = new HashMap<>(); if (content.startsWith("{")) { auth = new JacksonAdapter().deserialize(content, auth.getClass(), SerializerEncoding.JSON); return auth.get("clientSecret"); } else { Properties authSettings = new Properties(); try (FileInputStream credentialsFileStream = new FileInputStream(new File(envSecondaryServicePrincipal))) { authSettings.load(credentialsFileStream); } return authSettings.getProperty("key"); } } /** * This method creates a certificate for given password. * * @param certPath location of certificate file * @param pfxPath location of pfx file * @param alias User alias * @param password alias password * @param cnName domain name * @param dnsName dns name in subject alternate name * @throws IOException IO Exception */ public static void createCertificate(String certPath, String pfxPath, String alias, String password, String cnName, String dnsName) throws IOException { if (new File(pfxPath).exists()) { return; } String validityInDays = "3650"; String keyAlg = "RSA"; String sigAlg = "SHA1withRSA"; String keySize = "2048"; String storeType = "pkcs12"; String command = "keytool"; String jdkPath = System.getProperty("java.home"); if (jdkPath != null && !jdkPath.isEmpty()) { jdkPath = jdkPath.concat("\\bin"); if (new File(jdkPath).isDirectory()) { command = String.format("%s%s%s", jdkPath, File.separator, command); } } else { return; } String[] commandArgs = {command, "-genkey", "-alias", alias, "-keystore", pfxPath, "-storepass", password, "-validity", validityInDays, "-keyalg", keyAlg, "-sigalg", sigAlg, "-keysize", keySize, "-storetype", storeType, "-dname", "CN=" + cnName, "-ext", "EKU=1.3.6.1.5.5.7.3.1"}; if (dnsName != null) { List<String> args = new ArrayList<>(Arrays.asList(commandArgs)); args.add("-ext"); args.add("san=dns:" + dnsName); commandArgs = args.toArray(new String[0]); } Utils.cmdInvocation(commandArgs, true); File pfxFile = new File(pfxPath); if (pfxFile.exists()) { String[] certCommandArgs = {command, "-export", "-alias", alias, "-storetype", storeType, "-keystore", pfxPath, "-storepass", password, "-rfc", "-file", certPath}; Utils.cmdInvocation(certCommandArgs, true); File cerFile = new File(pfxPath); if (!cerFile.exists()) { throw new IOException( "Error occurred while creating certificate" + String.join(" ", certCommandArgs)); } } else { throw new IOException("Error occurred while creating certificates" + String.join(" ", commandArgs)); } } /** * This method is used for invoking native commands. * * @param command :- command to invoke. * @param ignoreErrorStream : Boolean which controls whether to throw exception or not * based on error stream. * @return result :- depending on the method invocation. * @throws IOException exceptions thrown from the execution */ public static String cmdInvocation(String[] command, boolean ignoreErrorStream) throws IOException { String result = ""; String error = ""; Process process = new ProcessBuilder(command).start(); try ( InputStream inputStream = process.getInputStream(); InputStream errorStream = process.getErrorStream(); BufferedReader br = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); BufferedReader ebr = new BufferedReader(new InputStreamReader(errorStream, StandardCharsets.UTF_8)); ) { result = br.readLine(); process.waitFor(); error = ebr.readLine(); if (error != null && (!"".equals(error))) { if (!ignoreErrorStream) { throw new IOException(error, null); } } } catch (Exception e) { throw new RuntimeException("Exception occurred while invoking command", e); } return result; } /** * Prints information for passed SQL Server. * * @param sqlServer sqlServer to be printed */ public static void print(SqlServer sqlServer) { StringBuilder builder = new StringBuilder().append("Sql Server: ").append(sqlServer.id()) .append("Name: ").append(sqlServer.name()) .append("\n\tResource group: ").append(sqlServer.resourceGroupName()) .append("\n\tRegion: ").append(sqlServer.region()) .append("\n\tSqlServer version: ").append(sqlServer.version()) .append("\n\tFully qualified name for Sql Server: ").append(sqlServer.fullyQualifiedDomainName()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL Database. * * @param database database to be printed */ public static void print(SqlDatabase database) { StringBuilder builder = new StringBuilder().append("Sql Database: ").append(database.id()) .append("Name: ").append(database.name()) .append("\n\tResource group: ").append(database.resourceGroupName()) .append("\n\tRegion: ").append(database.region()) .append("\n\tSqlServer Name: ").append(database.sqlServerName()) .append("\n\tEdition of SQL database: ").append(database.edition()) .append("\n\tCollation of SQL database: ").append(database.collation()) .append("\n\tCreation date of SQL database: ").append(database.creationDate()) .append("\n\tIs data warehouse: ").append(database.isDataWarehouse()) .append("\n\tRequested service objective of SQL database: ").append(database.requestedServiceObjectiveName()) .append("\n\tName of current service objective of SQL database: ").append(database.currentServiceObjectiveName()) .append("\n\tMax size bytes of SQL database: ").append(database.maxSizeBytes()) .append("\n\tDefault secondary location of SQL database: ").append(database.defaultSecondaryLocation()); System.out.println(builder.toString()); } /** * Prints information for the passed firewall rule. * * @param firewallRule firewall rule to be printed. */ public static void print(SqlFirewallRule firewallRule) { StringBuilder builder = new StringBuilder().append("Sql firewall rule: ").append(firewallRule.id()) .append("Name: ").append(firewallRule.name()) .append("\n\tResource group: ").append(firewallRule.resourceGroupName()) .append("\n\tRegion: ").append(firewallRule.region()) .append("\n\tSqlServer Name: ").append(firewallRule.sqlServerName()) .append("\n\tStart IP Address of the firewall rule: ").append(firewallRule.startIpAddress()) .append("\n\tEnd IP Address of the firewall rule: ").append(firewallRule.endIpAddress()); System.out.println(builder.toString()); } /** * Prints information for the passed virtual network rule. * * @param virtualNetworkRule virtual network rule to be printed. */ public static void print(SqlVirtualNetworkRule virtualNetworkRule) { StringBuilder builder = new StringBuilder().append("SQL virtual network rule: ").append(virtualNetworkRule.id()) .append("Name: ").append(virtualNetworkRule.name()) .append("\n\tResource group: ").append(virtualNetworkRule.resourceGroupName()) .append("\n\tSqlServer Name: ").append(virtualNetworkRule.sqlServerName()) .append("\n\tSubnet ID: ").append(virtualNetworkRule.subnetId()) .append("\n\tState: ").append(virtualNetworkRule.state()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL subscription usage metric. * * @param subscriptionUsageMetric metric to be printed. */ public static void print(SqlSubscriptionUsageMetric subscriptionUsageMetric) { StringBuilder builder = new StringBuilder().append("SQL Subscription Usage Metric: ").append(subscriptionUsageMetric.id()) .append("Name: ").append(subscriptionUsageMetric.name()) .append("\n\tDisplay Name: ").append(subscriptionUsageMetric.displayName()) .append("\n\tCurrent Value: ").append(subscriptionUsageMetric.currentValue()) .append("\n\tLimit: ").append(subscriptionUsageMetric.limit()) .append("\n\tUnit: ").append(subscriptionUsageMetric.unit()) .append("\n\tType: ").append(subscriptionUsageMetric.type()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL database usage metric. * * @param dbUsageMetric metric to be printed. */ public static void print(SqlDatabaseUsageMetric dbUsageMetric) { StringBuilder builder = new StringBuilder().append("SQL Database Usage Metric") .append("Name: ").append(dbUsageMetric.name()) .append("\n\tDisplay Name: ").append(dbUsageMetric.displayName()) .append("\n\tCurrent Value: ").append(dbUsageMetric.currentValue()) .append("\n\tLimit: ").append(dbUsageMetric.limit()) .append("\n\tUnit: ").append(dbUsageMetric.unit()); System.out.println(builder.toString()); } /** * Prints information for the passed Failover Group. * * @param failoverGroup the SQL Failover Group to be printed. */ public static void print(SqlFailoverGroup failoverGroup) { StringBuilder builder = new StringBuilder().append("SQL Failover Group: ").append(failoverGroup.id()) .append("Name: ").append(failoverGroup.name()) .append("\n\tResource group: ").append(failoverGroup.resourceGroupName()) .append("\n\tSqlServer Name: ").append(failoverGroup.sqlServerName()) .append("\n\tRead-write endpoint policy: ").append(failoverGroup.readWriteEndpointPolicy()) .append("\n\tData loss grace period: ").append(failoverGroup.readWriteEndpointDataLossGracePeriodMinutes()) .append("\n\tRead-only endpoint policy: ").append(failoverGroup.readOnlyEndpointPolicy()) .append("\n\tReplication state: ").append(failoverGroup.replicationState()) .append("\n\tReplication role: ").append(failoverGroup.replicationRole()); builder.append("\n\tPartner Servers: "); for (PartnerInfo item : failoverGroup.partnerServers()) { builder .append("\n\t\tId: ").append(item.id()) .append("\n\t\tLocation: ").append(item.location()) .append("\n\t\tReplication role: ").append(item.replicationRole()); } builder.append("\n\tDatabases: "); for (String databaseId : failoverGroup.databases()) { builder.append("\n\t\tID: ").append(databaseId); } System.out.println(builder.toString()); } /** * Prints information for the passed SQL server key. * * @param serverKey virtual network rule to be printed. */ public static void print(SqlServerKey serverKey) { StringBuilder builder = new StringBuilder().append("SQL server key: ").append(serverKey.id()) .append("Name: ").append(serverKey.name()) .append("\n\tResource group: ").append(serverKey.resourceGroupName()) .append("\n\tSqlServer Name: ").append(serverKey.sqlServerName()) .append("\n\tRegion: ").append(serverKey.region() != null ? serverKey.region().name() : "") .append("\n\tServer Key Type: ").append(serverKey.serverKeyType()) .append("\n\tServer Key URI: ").append(serverKey.uri()) .append("\n\tServer Key Thumbprint: ").append(serverKey.thumbprint()) .append("\n\tServer Key Creation Date: ").append(serverKey.creationDate() != null ? serverKey.creationDate().toString() : ""); System.out.println(builder.toString()); } /** * Prints information of the elastic pool passed in. * * @param elasticPool elastic pool to be printed */ public static void print(SqlElasticPool elasticPool) { StringBuilder builder = new StringBuilder().append("Sql elastic pool: ").append(elasticPool.id()) .append("Name: ").append(elasticPool.name()) .append("\n\tResource group: ").append(elasticPool.resourceGroupName()) .append("\n\tRegion: ").append(elasticPool.region()) .append("\n\tSqlServer Name: ").append(elasticPool.sqlServerName()) .append("\n\tEdition of elastic pool: ").append(elasticPool.edition()) .append("\n\tTotal number of DTUs in the elastic pool: ").append(elasticPool.dtu()) .append("\n\tMaximum DTUs a database can get in elastic pool: ").append(elasticPool.databaseDtuMax()) .append("\n\tMinimum DTUs a database is guaranteed in elastic pool: ").append(elasticPool.databaseDtuMin()) .append("\n\tCreation date for the elastic pool: ").append(elasticPool.creationDate()) .append("\n\tState of the elastic pool: ").append(elasticPool.state()) .append("\n\tStorage capacity in MBs for the elastic pool: ").append(elasticPool.storageCapacity()); System.out.println(builder.toString()); } /** * Prints information of the elastic pool activity. * * @param elasticPoolActivity elastic pool activity to be printed */ public static void print(ElasticPoolActivity elasticPoolActivity) { StringBuilder builder = new StringBuilder().append("Sql elastic pool activity: ").append(elasticPoolActivity.id()) .append("Name: ").append(elasticPoolActivity.name()) .append("\n\tResource group: ").append(elasticPoolActivity.resourceGroupName()) .append("\n\tState: ").append(elasticPoolActivity.state()) .append("\n\tElastic pool name: ").append(elasticPoolActivity.elasticPoolName()) .append("\n\tStart time of activity: ").append(elasticPoolActivity.startTime()) .append("\n\tEnd time of activity: ").append(elasticPoolActivity.endTime()) .append("\n\tError code of activity: ").append(elasticPoolActivity.errorCode()) .append("\n\tError message of activity: ").append(elasticPoolActivity.errorMessage()) .append("\n\tError severity of activity: ").append(elasticPoolActivity.errorSeverity()) .append("\n\tOperation: ").append(elasticPoolActivity.operation()) .append("\n\tCompleted percentage of activity: ").append(elasticPoolActivity.percentComplete()); System.out.println(builder.toString()); } /** * Print an application gateway. * * @param resource an application gateway */ public static void print(ApplicationGateway resource) { StringBuilder info = new StringBuilder(); info.append("Application gateway: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tSKU: ").append(resource.sku().toString()) .append("\n\tOperational state: ").append(resource.operationalState()) .append("\n\tInternet-facing? ").append(resource.isPublic()) .append("\n\tInternal? ").append(resource.isPrivate()) .append("\n\tDefault private IP address: ").append(resource.privateIpAddress()) .append("\n\tPrivate IP address allocation method: ").append(resource.privateIpAllocationMethod()) .append("\n\tDisabled SSL protocols: ").append(resource.disabledSslProtocols().toString()); Map<String, ApplicationGatewayIpConfiguration> ipConfigs = resource.ipConfigurations(); info.append("\n\tIP configurations: ").append(ipConfigs.size()); for (ApplicationGatewayIpConfiguration ipConfig : ipConfigs.values()) { info.append("\n\t\tName: ").append(ipConfig.name()) .append("\n\t\t\tNetwork id: ").append(ipConfig.networkId()) .append("\n\t\t\tSubnet name: ").append(ipConfig.subnetName()); } Map<String, ApplicationGatewayFrontend> frontends = resource.frontends(); info.append("\n\tFrontends: ").append(frontends.size()); for (ApplicationGatewayFrontend frontend : frontends.values()) { info.append("\n\t\tName: ").append(frontend.name()) .append("\n\t\t\tPublic? ").append(frontend.isPublic()); if (frontend.isPublic()) { info.append("\n\t\t\tPublic IP address ID: ").append(frontend.publicIpAddressId()); } if (frontend.isPrivate()) { info.append("\n\t\t\tPrivate IP address: ").append(frontend.privateIpAddress()) .append("\n\t\t\tPrivate IP allocation method: ").append(frontend.privateIpAllocationMethod()) .append("\n\t\t\tSubnet name: ").append(frontend.subnetName()) .append("\n\t\t\tVirtual network ID: ").append(frontend.networkId()); } } Map<String, ApplicationGatewayBackend> backends = resource.backends(); info.append("\n\tBackends: ").append(backends.size()); for (ApplicationGatewayBackend backend : backends.values()) { info.append("\n\t\tName: ").append(backend.name()) .append("\n\t\t\tAssociated NIC IP configuration IDs: ").append(backend.backendNicIPConfigurationNames().keySet()); Collection<ApplicationGatewayBackendAddress> addresses = backend.addresses(); info.append("\n\t\t\tAddresses: ").append(addresses.size()); for (ApplicationGatewayBackendAddress address : addresses) { info.append("\n\t\t\t\tFQDN: ").append(address.fqdn()) .append("\n\t\t\t\tIP: ").append(address.ipAddress()); } } Map<String, ApplicationGatewayBackendHttpConfiguration> httpConfigs = resource.backendHttpConfigurations(); info.append("\n\tHTTP Configurations: ").append(httpConfigs.size()); for (ApplicationGatewayBackendHttpConfiguration httpConfig : httpConfigs.values()) { info.append("\n\t\tName: ").append(httpConfig.name()) .append("\n\t\t\tCookie based affinity: ").append(httpConfig.cookieBasedAffinity()) .append("\n\t\t\tPort: ").append(httpConfig.port()) .append("\n\t\t\tRequest timeout in seconds: ").append(httpConfig.requestTimeout()) .append("\n\t\t\tProtocol: ").append(httpConfig.protocol()) .append("\n\t\tHost header: ").append(httpConfig.hostHeader()) .append("\n\t\tHost header comes from backend? ").append(httpConfig.isHostHeaderFromBackend()) .append("\n\t\tConnection draining timeout in seconds: ").append(httpConfig.connectionDrainingTimeoutInSeconds()) .append("\n\t\tAffinity cookie name: ").append(httpConfig.affinityCookieName()) .append("\n\t\tPath: ").append(httpConfig.path()); ApplicationGatewayProbe probe = httpConfig.probe(); if (probe != null) { info.append("\n\t\tProbe: " + probe.name()); } info.append("\n\t\tIs probe enabled? ").append(httpConfig.isProbeEnabled()); } Map<String, ApplicationGatewaySslCertificate> sslCerts = resource.sslCertificates(); info.append("\n\tSSL certificates: ").append(sslCerts.size()); for (ApplicationGatewaySslCertificate cert : sslCerts.values()) { info.append("\n\t\tName: ").append(cert.name()) .append("\n\t\t\tCert data: ").append(cert.publicData()); } Map<String, ApplicationGatewayRedirectConfiguration> redirects = resource.redirectConfigurations(); info.append("\n\tRedirect configurations: ").append(redirects.size()); for (ApplicationGatewayRedirectConfiguration redirect : redirects.values()) { info.append("\n\t\tName: ").append(redirect.name()) .append("\n\t\tTarget URL: ").append(redirect.type()) .append("\n\t\tTarget URL: ").append(redirect.targetUrl()) .append("\n\t\tTarget listener: ").append(redirect.targetListener() != null ? redirect.targetListener().name() : null) .append("\n\t\tIs path included? ").append(redirect.isPathIncluded()) .append("\n\t\tIs query string included? ").append(redirect.isQueryStringIncluded()) .append("\n\t\tReferencing request routing rules: ").append(redirect.requestRoutingRules().values()); } Map<String, ApplicationGatewayListener> listeners = resource.listeners(); info.append("\n\tHTTP listeners: ").append(listeners.size()); for (ApplicationGatewayListener listener : listeners.values()) { info.append("\n\t\tName: ").append(listener.name()) .append("\n\t\t\tHost name: ").append(listener.hostname()) .append("\n\t\t\tServer name indication required? ").append(listener.requiresServerNameIndication()) .append("\n\t\t\tAssociated frontend name: ").append(listener.frontend().name()) .append("\n\t\t\tFrontend port name: ").append(listener.frontendPortName()) .append("\n\t\t\tFrontend port number: ").append(listener.frontendPortNumber()) .append("\n\t\t\tProtocol: ").append(listener.protocol().toString()); if (listener.sslCertificate() != null) { info.append("\n\t\t\tAssociated SSL certificate: ").append(listener.sslCertificate().name()); } } Map<String, ApplicationGatewayProbe> probes = resource.probes(); info.append("\n\tProbes: ").append(probes.size()); for (ApplicationGatewayProbe probe : probes.values()) { info.append("\n\t\tName: ").append(probe.name()) .append("\n\t\tProtocol:").append(probe.protocol().toString()) .append("\n\t\tInterval in seconds: ").append(probe.timeBetweenProbesInSeconds()) .append("\n\t\tRetries: ").append(probe.retriesBeforeUnhealthy()) .append("\n\t\tTimeout: ").append(probe.timeoutInSeconds()) .append("\n\t\tHost: ").append(probe.host()) .append("\n\t\tHealthy HTTP response status code ranges: ").append(probe.healthyHttpResponseStatusCodeRanges()) .append("\n\t\tHealthy HTTP response body contents: ").append(probe.healthyHttpResponseBodyContents()); } Map<String, ApplicationGatewayRequestRoutingRule> rules = resource.requestRoutingRules(); info.append("\n\tRequest routing rules: ").append(rules.size()); for (ApplicationGatewayRequestRoutingRule rule : rules.values()) { info.append("\n\t\tName: ").append(rule.name()) .append("\n\t\tType: ").append(rule.ruleType()) .append("\n\t\tPublic IP address ID: ").append(rule.publicIpAddressId()) .append("\n\t\tHost name: ").append(rule.hostname()) .append("\n\t\tServer name indication required? ").append(rule.requiresServerNameIndication()) .append("\n\t\tFrontend port: ").append(rule.frontendPort()) .append("\n\t\tFrontend protocol: ").append(rule.frontendProtocol().toString()) .append("\n\t\tBackend port: ").append(rule.backendPort()) .append("\n\t\tCookie based affinity enabled? ").append(rule.cookieBasedAffinity()) .append("\n\t\tRedirect configuration: ").append(rule.redirectConfiguration() != null ? rule.redirectConfiguration().name() : "(none)"); Collection<ApplicationGatewayBackendAddress> addresses = rule.backendAddresses(); info.append("\n\t\t\tBackend addresses: ").append(addresses.size()); for (ApplicationGatewayBackendAddress address : addresses) { info.append("\n\t\t\t\t") .append(address.fqdn()) .append(" [").append(address.ipAddress()).append("]"); } info.append("\n\t\t\tSSL certificate name: "); ApplicationGatewaySslCertificate cert = rule.sslCertificate(); if (cert == null) { info.append("(None)"); } else { info.append(cert.name()); } info.append("\n\t\t\tAssociated backend address pool: "); ApplicationGatewayBackend backend = rule.backend(); if (backend == null) { info.append("(None)"); } else { info.append(backend.name()); } info.append("\n\t\t\tAssociated backend HTTP settings configuration: "); ApplicationGatewayBackendHttpConfiguration config = rule.backendHttpConfiguration(); if (config == null) { info.append("(None)"); } else { info.append(config.name()); } info.append("\n\t\t\tAssociated frontend listener: "); ApplicationGatewayListener listener = rule.listener(); if (listener == null) { info.append("(None)"); } else { info.append(config.name()); } } System.out.println(info.toString()); } /** * Prints information of a virtual machine custom image. * * @param image the image */ public static void print(VirtualMachineCustomImage image) { StringBuilder builder = new StringBuilder().append("Virtual machine custom image: ").append(image.id()) .append("Name: ").append(image.name()) .append("\n\tResource group: ").append(image.resourceGroupName()) .append("\n\tCreated from virtual machine: ").append(image.sourceVirtualMachineId()); builder.append("\n\tOS disk image: ") .append("\n\t\tOperating system: ").append(image.osDiskImage().osType()) .append("\n\t\tOperating system state: ").append(image.osDiskImage().osState()) .append("\n\t\tCaching: ").append(image.osDiskImage().caching()) .append("\n\t\tSize (GB): ").append(image.osDiskImage().diskSizeGB()); if (image.isCreatedFromVirtualMachine()) { builder.append("\n\t\tSource virtual machine: ").append(image.sourceVirtualMachineId()); } if (image.osDiskImage().managedDisk() != null) { builder.append("\n\t\tSource managed disk: ").append(image.osDiskImage().managedDisk().id()); } if (image.osDiskImage().snapshot() != null) { builder.append("\n\t\tSource snapshot: ").append(image.osDiskImage().snapshot().id()); } if (image.osDiskImage().blobUri() != null) { builder.append("\n\t\tSource un-managed vhd: ").append(image.osDiskImage().blobUri()); } if (image.dataDiskImages() != null) { for (ImageDataDisk diskImage : image.dataDiskImages().values()) { builder.append("\n\tDisk Image (Lun) .append("\n\t\tCaching: ").append(diskImage.caching()) .append("\n\t\tSize (GB): ").append(diskImage.diskSizeGB()); if (image.isCreatedFromVirtualMachine()) { builder.append("\n\t\tSource virtual machine: ").append(image.sourceVirtualMachineId()); } if (diskImage.managedDisk() != null) { builder.append("\n\t\tSource managed disk: ").append(diskImage.managedDisk().id()); } if (diskImage.snapshot() != null) { builder.append("\n\t\tSource snapshot: ").append(diskImage.snapshot().id()); } if (diskImage.blobUri() != null) { builder.append("\n\t\tSource un-managed vhd: ").append(diskImage.blobUri()); } } } System.out.println(builder.toString()); } /** * Uploads a file to an Azure app service for Web App. * * @param profile the publishing profile for the app service. * @param fileName the name of the file on server * @param file the local file */ public static void uploadFileViaFtp(PublishingProfile profile, String fileName, InputStream file) { String path = "./site/wwwroot/webapps"; uploadFileViaFtp(profile, fileName, file, path); } /** * Uploads a file to an Azure app service for Function App. * * @param profile the publishing profile for the app service. * @param fileName the name of the file on server * @param file the local file */ public static void uploadFileForFunctionViaFtp(PublishingProfile profile, String fileName, InputStream file) { String path = "./site/wwwroot"; uploadFileViaFtp(profile, fileName, file, path); } private static void uploadFileViaFtp(PublishingProfile profile, String fileName, InputStream file, String path) { FTPClient ftpClient = new FTPClient(); String[] ftpUrlSegments = profile.ftpUrl().split("/", 2); String server = ftpUrlSegments[0]; if (fileName.contains("/")) { int lastslash = fileName.lastIndexOf('/'); path = path + "/" + fileName.substring(0, lastslash); fileName = fileName.substring(lastslash + 1); } try { ftpClient.connect(server); ftpClient.enterLocalPassiveMode(); ftpClient.login(profile.ftpUsername(), profile.ftpPassword()); ftpClient.setFileType(FTP.BINARY_FILE_TYPE); for (String segment : path.split("/")) { if (!ftpClient.changeWorkingDirectory(segment)) { ftpClient.makeDirectory(segment); ftpClient.changeWorkingDirectory(segment); } } ftpClient.storeFile(fileName, file); ftpClient.disconnect(); } catch (IOException e) { e.printStackTrace(); } } /** * Print service bus namespace info. * * @param serviceBusNamespace a service bus namespace */ public static void print(ServiceBusNamespace serviceBusNamespace) { StringBuilder builder = new StringBuilder() .append("Service bus Namespace: ").append(serviceBusNamespace.id()) .append("\n\tName: ").append(serviceBusNamespace.name()) .append("\n\tRegion: ").append(serviceBusNamespace.regionName()) .append("\n\tResourceGroupName: ").append(serviceBusNamespace.resourceGroupName()) .append("\n\tCreatedAt: ").append(serviceBusNamespace.createdAt()) .append("\n\tUpdatedAt: ").append(serviceBusNamespace.updatedAt()) .append("\n\tDnsLabel: ").append(serviceBusNamespace.dnsLabel()) .append("\n\tFQDN: ").append(serviceBusNamespace.fqdn()) .append("\n\tSku: ") .append("\n\t\tCapacity: ").append(serviceBusNamespace.sku().capacity()) .append("\n\t\tSkuName: ").append(serviceBusNamespace.sku().name()) .append("\n\t\tTier: ").append(serviceBusNamespace.sku().tier()); System.out.println(builder.toString()); } /** * Print service bus queue info. * * @param queue a service bus queue */ public static void print(Queue queue) { StringBuilder builder = new StringBuilder() .append("Service bus Queue: ").append(queue.id()) .append("\n\tName: ").append(queue.name()) .append("\n\tResourceGroupName: ").append(queue.resourceGroupName()) .append("\n\tCreatedAt: ").append(queue.createdAt()) .append("\n\tUpdatedAt: ").append(queue.updatedAt()) .append("\n\tAccessedAt: ").append(queue.accessedAt()) .append("\n\tActiveMessageCount: ").append(queue.activeMessageCount()) .append("\n\tCurrentSizeInBytes: ").append(queue.currentSizeInBytes()) .append("\n\tDeadLetterMessageCount: ").append(queue.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(queue.defaultMessageTtlDuration()) .append("\n\tDuplicateMessageDetectionHistoryDuration: ").append(queue.duplicateMessageDetectionHistoryDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(queue.isBatchedOperationsEnabled()) .append("\n\tIsDeadLetteringEnabledForExpiredMessages: ").append(queue.isDeadLetteringEnabledForExpiredMessages()) .append("\n\tIsDuplicateDetectionEnabled: ").append(queue.isDuplicateDetectionEnabled()) .append("\n\tIsExpressEnabled: ").append(queue.isExpressEnabled()) .append("\n\tIsPartitioningEnabled: ").append(queue.isPartitioningEnabled()) .append("\n\tIsSessionEnabled: ").append(queue.isSessionEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(queue.deleteOnIdleDurationInMinutes()) .append("\n\tMaxDeliveryCountBeforeDeadLetteringMessage: ").append(queue.maxDeliveryCountBeforeDeadLetteringMessage()) .append("\n\tMaxSizeInMB: ").append(queue.maxSizeInMB()) .append("\n\tMessageCount: ").append(queue.messageCount()) .append("\n\tScheduledMessageCount: ").append(queue.scheduledMessageCount()) .append("\n\tStatus: ").append(queue.status()) .append("\n\tTransferMessageCount: ").append(queue.transferMessageCount()) .append("\n\tLockDurationInSeconds: ").append(queue.lockDurationInSeconds()) .append("\n\tTransferDeadLetterMessageCount: ").append(queue.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print service bus queue authorization keys info. * * @param queueAuthorizationRule a service bus queue authorization keys */ public static void print(QueueAuthorizationRule queueAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus queue authorization rule: ").append(queueAuthorizationRule.id()) .append("\n\tName: ").append(queueAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(queueAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(queueAuthorizationRule.namespaceName()) .append("\n\tQueue Name: ").append(queueAuthorizationRule.queueName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = queueAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print service bus namespace authorization keys info. * * @param keys a service bus namespace authorization keys */ public static void print(AuthorizationKeys keys) { StringBuilder builder = new StringBuilder() .append("Authorization keys: ") .append("\n\tPrimaryKey: ").append(keys.primaryKey()) .append("\n\tPrimaryConnectionString: ").append(keys.primaryConnectionString()) .append("\n\tSecondaryKey: ").append(keys.secondaryKey()) .append("\n\tSecondaryConnectionString: ").append(keys.secondaryConnectionString()); System.out.println(builder.toString()); } /** * Print service bus namespace authorization rule info. * * @param namespaceAuthorizationRule a service bus namespace authorization rule */ public static void print(NamespaceAuthorizationRule namespaceAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus queue authorization rule: ").append(namespaceAuthorizationRule.id()) .append("\n\tName: ").append(namespaceAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(namespaceAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(namespaceAuthorizationRule.namespaceName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = namespaceAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print service bus topic info. * * @param topic a service bus topic */ public static void print(Topic topic) { StringBuilder builder = new StringBuilder() .append("Service bus topic: ").append(topic.id()) .append("\n\tName: ").append(topic.name()) .append("\n\tResourceGroupName: ").append(topic.resourceGroupName()) .append("\n\tCreatedAt: ").append(topic.createdAt()) .append("\n\tUpdatedAt: ").append(topic.updatedAt()) .append("\n\tAccessedAt: ").append(topic.accessedAt()) .append("\n\tActiveMessageCount: ").append(topic.activeMessageCount()) .append("\n\tCurrentSizeInBytes: ").append(topic.currentSizeInBytes()) .append("\n\tDeadLetterMessageCount: ").append(topic.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(topic.defaultMessageTtlDuration()) .append("\n\tDuplicateMessageDetectionHistoryDuration: ").append(topic.duplicateMessageDetectionHistoryDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(topic.isBatchedOperationsEnabled()) .append("\n\tIsDuplicateDetectionEnabled: ").append(topic.isDuplicateDetectionEnabled()) .append("\n\tIsExpressEnabled: ").append(topic.isExpressEnabled()) .append("\n\tIsPartitioningEnabled: ").append(topic.isPartitioningEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(topic.deleteOnIdleDurationInMinutes()) .append("\n\tMaxSizeInMB: ").append(topic.maxSizeInMB()) .append("\n\tScheduledMessageCount: ").append(topic.scheduledMessageCount()) .append("\n\tStatus: ").append(topic.status()) .append("\n\tTransferMessageCount: ").append(topic.transferMessageCount()) .append("\n\tSubscriptionCount: ").append(topic.subscriptionCount()) .append("\n\tTransferDeadLetterMessageCount: ").append(topic.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print service bus subscription info. * * @param serviceBusSubscription a service bus subscription */ public static void print(ServiceBusSubscription serviceBusSubscription) { StringBuilder builder = new StringBuilder() .append("Service bus subscription: ").append(serviceBusSubscription.id()) .append("\n\tName: ").append(serviceBusSubscription.name()) .append("\n\tResourceGroupName: ").append(serviceBusSubscription.resourceGroupName()) .append("\n\tCreatedAt: ").append(serviceBusSubscription.createdAt()) .append("\n\tUpdatedAt: ").append(serviceBusSubscription.updatedAt()) .append("\n\tAccessedAt: ").append(serviceBusSubscription.accessedAt()) .append("\n\tActiveMessageCount: ").append(serviceBusSubscription.activeMessageCount()) .append("\n\tDeadLetterMessageCount: ").append(serviceBusSubscription.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(serviceBusSubscription.defaultMessageTtlDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(serviceBusSubscription.isBatchedOperationsEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(serviceBusSubscription.deleteOnIdleDurationInMinutes()) .append("\n\tScheduledMessageCount: ").append(serviceBusSubscription.scheduledMessageCount()) .append("\n\tStatus: ").append(serviceBusSubscription.status()) .append("\n\tTransferMessageCount: ").append(serviceBusSubscription.transferMessageCount()) .append("\n\tIsDeadLetteringEnabledForExpiredMessages: ").append(serviceBusSubscription.isDeadLetteringEnabledForExpiredMessages()) .append("\n\tIsSessionEnabled: ").append(serviceBusSubscription.isSessionEnabled()) .append("\n\tLockDurationInSeconds: ").append(serviceBusSubscription.lockDurationInSeconds()) .append("\n\tMaxDeliveryCountBeforeDeadLetteringMessage: ").append(serviceBusSubscription.maxDeliveryCountBeforeDeadLetteringMessage()) .append("\n\tIsDeadLetteringEnabledForFilterEvaluationFailedMessages: ").append(serviceBusSubscription.isDeadLetteringEnabledForFilterEvaluationFailedMessages()) .append("\n\tTransferMessageCount: ").append(serviceBusSubscription.transferMessageCount()) .append("\n\tTransferDeadLetterMessageCount: ").append(serviceBusSubscription.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print topic Authorization Rule info. * * @param topicAuthorizationRule a topic Authorization Rule */ public static void print(TopicAuthorizationRule topicAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus topic authorization rule: ").append(topicAuthorizationRule.id()) .append("\n\tName: ").append(topicAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(topicAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(topicAuthorizationRule.namespaceName()) .append("\n\tTopic Name: ").append(topicAuthorizationRule.topicName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = topicAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print CosmosDB info. * * @param cosmosDBAccount a CosmosDB */ public static void print(CosmosDBAccount cosmosDBAccount) { StringBuilder builder = new StringBuilder() .append("CosmosDB: ").append(cosmosDBAccount.id()) .append("\n\tName: ").append(cosmosDBAccount.name()) .append("\n\tResourceGroupName: ").append(cosmosDBAccount.resourceGroupName()) .append("\n\tKind: ").append(cosmosDBAccount.kind().toString()) .append("\n\tDefault consistency level: ").append(cosmosDBAccount.consistencyPolicy().defaultConsistencyLevel()) .append("\n\tIP range filter: ").append(cosmosDBAccount.ipRangeFilter()); DatabaseAccountListKeysResult keys = cosmosDBAccount.listKeys(); DatabaseAccountListReadOnlyKeysResult readOnlyKeys = cosmosDBAccount.listReadOnlyKeys(); builder .append("\n\tPrimary Master Key: ").append(keys.primaryMasterKey()) .append("\n\tSecondary Master Key: ").append(keys.secondaryMasterKey()) .append("\n\tPrimary Read-Only Key: ").append(readOnlyKeys.primaryReadonlyMasterKey()) .append("\n\tSecondary Read-Only Key: ").append(readOnlyKeys.secondaryReadonlyMasterKey()); for (Location writeReplica : cosmosDBAccount.writableReplications()) { builder.append("\n\t\tWrite replication: ") .append("\n\t\t\tName :").append(writeReplica.locationName()); } builder.append("\n\tNumber of read replications: ").append(cosmosDBAccount.readableReplications().size()); for (Location readReplica : cosmosDBAccount.readableReplications()) { builder.append("\n\t\tRead replication: ") .append("\n\t\t\tName :").append(readReplica.locationName()); } } /** * Print Active Directory User info. * * @param user active directory user */ public static void print(ActiveDirectoryUser user) { StringBuilder builder = new StringBuilder() .append("Active Directory User: ").append(user.id()) .append("\n\tName: ").append(user.name()) .append("\n\tMail: ").append(user.mail()) .append("\n\tMail Nickname: ").append(user.mailNickname()) .append("\n\tUser Principal Name: ").append(user.userPrincipalName()); System.out.println(builder.toString()); } /** * Print Active Directory User info. * * @param role role definition */ public static void print(RoleDefinition role) { StringBuilder builder = new StringBuilder() .append("Role Definition: ").append(role.id()) .append("\n\tName: ").append(role.name()) .append("\n\tRole Name: ").append(role.roleName()) .append("\n\tType: ").append(role.type()) .append("\n\tDescription: ").append(role.description()) .append("\n\tType: ").append(role.type()); Set<Permission> permissions = role.permissions(); builder.append("\n\tPermissions: ").append(permissions.size()); for (Permission permission : permissions) { builder.append("\n\t\tPermission Actions: " + permission.actions().size()); for (String action : permission.actions()) { builder.append("\n\t\t\tName :").append(action); } builder.append("\n\t\tPermission Not Actions: " + permission.notActions().size()); for (String notAction : permission.notActions()) { builder.append("\n\t\t\tName :").append(notAction); } builder.append("\n\t\tPermission Data Actions: " + permission.dataActions().size()); for (String dataActions : permission.dataActions()) { builder.append("\n\t\t\tName :").append(dataActions); } builder.append("\n\t\tPermission Not Data Actions: " + permission.notDataActions().size()); for (String notDataActions : permission.notDataActions()) { builder.append("\n\t\t\tName :").append(notDataActions); } } Set<String> assignableScopes = role.assignableScopes(); builder.append("\n\tAssignable scopes: ").append(assignableScopes.size()); for (String scope : assignableScopes) { builder.append("\n\t\tAssignable Scope: ") .append("\n\t\t\tName :").append(scope); } System.out.println(builder.toString()); } /** * Print Role Assignment info. * * @param roleAssignment role assignment */ public static void print(RoleAssignment roleAssignment) { StringBuilder builder = new StringBuilder() .append("Role Assignment: ") .append("\n\tScope: ").append(roleAssignment.scope()) .append("\n\tPrincipal Id: ").append(roleAssignment.principalId()) .append("\n\tRole Definition Id: ").append(roleAssignment.roleDefinitionId()); System.out.println(builder.toString()); } /** * Print Active Directory Group info. * * @param group active directory group */ public static void print(ActiveDirectoryGroup group) { StringBuilder builder = new StringBuilder() .append("Active Directory Group: ").append(group.id()) .append("\n\tName: ").append(group.name()) .append("\n\tMail: ").append(group.mail()) .append("\n\tSecurity Enabled: ").append(group.securityEnabled()) .append("\n\tGroup members:"); for (ActiveDirectoryObject object : group.listMembers()) { builder.append("\n\t\tType: ").append(object.getClass().getSimpleName()) .append("\tName: ").append(object.name()); } System.out.println(builder.toString()); } /** * Print Active Directory Application info. * * @param application active directory application */ public static void print(ActiveDirectoryApplication application) { StringBuilder builder = new StringBuilder() .append("Active Directory Application: ").append(application.id()) .append("\n\tName: ").append(application.name()) .append("\n\tSign on URL: ").append(application.signOnUrl()) .append("\n\tReply URLs:"); for (String replyUrl : application.replyUrls()) { builder.append("\n\t\t").append(replyUrl); } System.out.println(builder.toString()); } /** * Print Service Principal info. * * @param servicePrincipal service principal */ public static void print(ServicePrincipal servicePrincipal) { StringBuilder builder = new StringBuilder() .append("Service Principal: ").append(servicePrincipal.id()) .append("\n\tName: ").append(servicePrincipal.name()) .append("\n\tApplication Id: ").append(servicePrincipal.applicationId()); List<String> names = servicePrincipal.servicePrincipalNames(); builder.append("\n\tNames: ").append(names.size()); for (String name : names) { builder.append("\n\t\tName: ").append(name); } System.out.println(builder.toString()); } /** * Print Network Watcher info. * * @param nw network watcher */ public static void print(NetworkWatcher nw) { StringBuilder builder = new StringBuilder() .append("Network Watcher: ").append(nw.id()) .append("\n\tName: ").append(nw.name()) .append("\n\tResource group name: ").append(nw.resourceGroupName()) .append("\n\tRegion name: ").append(nw.regionName()); System.out.println(builder.toString()); } /** * Print packet capture info. * * @param resource packet capture */ public static void print(PacketCapture resource) { StringBuilder sb = new StringBuilder().append("Packet Capture: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tTarget id: ").append(resource.targetId()) .append("\n\tTime limit in seconds: ").append(resource.timeLimitInSeconds()) .append("\n\tBytes to capture per packet: ").append(resource.bytesToCapturePerPacket()) .append("\n\tProvisioning state: ").append(resource.provisioningState()) .append("\n\tStorage location:") .append("\n\tStorage account id: ").append(resource.storageLocation().storageId()) .append("\n\tStorage account path: ").append(resource.storageLocation().storagePath()) .append("\n\tFile path: ").append(resource.storageLocation().filePath()) .append("\n\t Packet capture filters: ").append(resource.filters().size()); for (PacketCaptureFilter filter : resource.filters()) { sb.append("\n\t\tProtocol: ").append(filter.protocol()); sb.append("\n\t\tLocal IP address: ").append(filter.localIpAddress()); sb.append("\n\t\tRemote IP address: ").append(filter.remoteIpAddress()); sb.append("\n\t\tLocal port: ").append(filter.localPort()); sb.append("\n\t\tRemote port: ").append(filter.remotePort()); } System.out.println(sb.toString()); } /** * Print verification IP flow info. * * @param resource IP flow verification info */ public static void print(VerificationIPFlow resource) { System.out.println(new StringBuilder("IP flow verification: ") .append("\n\tAccess: ").append(resource.access()) .append("\n\tRule name: ").append(resource.ruleName()) .toString()); } /** * Print topology info. * * @param resource topology */ public static void print(Topology resource) { StringBuilder sb = new StringBuilder().append("Topology: ").append(resource.id()) .append("\n\tTopology parameters: ") .append("\n\t\tResource group: ").append(resource.topologyParameters().targetResourceGroupName()) .append("\n\t\tVirtual network: ").append(resource.topologyParameters().targetVirtualNetwork() == null ? "" : resource.topologyParameters().targetVirtualNetwork().id()) .append("\n\t\tSubnet id: ").append(resource.topologyParameters().targetSubnet() == null ? "" : resource.topologyParameters().targetSubnet().id()) .append("\n\tCreated time: ").append(resource.createdTime()) .append("\n\tLast modified time: ").append(resource.lastModifiedTime()); for (TopologyResource tr : resource.resources().values()) { sb.append("\n\tTopology resource: ").append(tr.id()) .append("\n\t\tName: ").append(tr.name()) .append("\n\t\tLocation: ").append(tr.location()) .append("\n\t\tAssociations:"); for (TopologyAssociation association : tr.associations()) { sb.append("\n\t\t\tName:").append(association.name()) .append("\n\t\t\tResource id:").append(association.resourceId()) .append("\n\t\t\tAssociation type:").append(association.associationType()); } } System.out.println(sb.toString()); } /** * Print flow log settings info. * * @param resource flow log settings */ public static void print(FlowLogSettings resource) { System.out.println(new StringBuilder().append("Flow log settings: ") .append("Target resource id: ").append(resource.targetResourceId()) .append("\n\tFlow log enabled: ").append(resource.enabled()) .append("\n\tStorage account id: ").append(resource.storageId()) .append("\n\tRetention policy enabled: ").append(resource.isRetentionEnabled()) .append("\n\tRetention policy days: ").append(resource.retentionDays()) .toString()); } /** * Print availability set info. * * @param resource an availability set */ public static void print(SecurityGroupView resource) { StringBuilder sb = new StringBuilder().append("Security group view: ") .append("\n\tVirtual machine id: ").append(resource.vmId()); for (SecurityGroupNetworkInterface sgni : resource.networkInterfaces().values()) { sb.append("\n\tSecurity group network interface:").append(sgni.id()) .append("\n\t\tSecurity group network interface:") .append("\n\t\tEffective security rules:"); for (EffectiveNetworkSecurityRule rule : sgni.securityRuleAssociations().effectiveSecurityRules()) { sb.append("\n\t\t\tName: ").append(rule.name()) .append("\n\t\t\tDirection: ").append(rule.direction()) .append("\n\t\t\tAccess: ").append(rule.access()) .append("\n\t\t\tPriority: ").append(rule.priority()) .append("\n\t\t\tSource address prefix: ").append(rule.sourceAddressPrefix()) .append("\n\t\t\tSource port range: ").append(rule.sourcePortRange()) .append("\n\t\t\tDestination address prefix: ").append(rule.destinationAddressPrefix()) .append("\n\t\t\tDestination port range: ").append(rule.destinationPortRange()) .append("\n\t\t\tProtocol: ").append(rule.protocol()); } sb.append("\n\t\tSubnet:").append(sgni.securityRuleAssociations().subnetAssociation().id()); printSecurityRule(sb, sgni.securityRuleAssociations().subnetAssociation().securityRules()); if (sgni.securityRuleAssociations().networkInterfaceAssociation() != null) { sb.append("\n\t\tNetwork interface:").append(sgni.securityRuleAssociations().networkInterfaceAssociation().id()); printSecurityRule(sb, sgni.securityRuleAssociations().networkInterfaceAssociation().securityRules()); } sb.append("\n\t\tDefault security rules:"); printSecurityRule(sb, sgni.securityRuleAssociations().defaultSecurityRules()); } System.out.println(sb.toString()); } private static void printSecurityRule(StringBuilder sb, List<SecurityRuleInner> rules) { for (SecurityRuleInner rule : rules) { sb.append("\n\t\t\tName: ").append(rule.name()) .append("\n\t\t\tDirection: ").append(rule.direction()) .append("\n\t\t\tAccess: ").append(rule.access()) .append("\n\t\t\tPriority: ").append(rule.priority()) .append("\n\t\t\tSource address prefix: ").append(rule.sourceAddressPrefix()) .append("\n\t\t\tSource port range: ").append(rule.sourcePortRange()) .append("\n\t\t\tDestination address prefix: ").append(rule.destinationAddressPrefix()) .append("\n\t\t\tDestination port range: ").append(rule.destinationPortRange()) .append("\n\t\t\tProtocol: ").append(rule.protocol()) .append("\n\t\t\tDescription: ").append(rule.description()) .append("\n\t\t\tProvisioning state: ").append(rule.provisioningState()); } } /** * Print next hop info. * * @param resource an availability set */ public static void print(NextHop resource) { System.out.println(new StringBuilder("Next hop: ") .append("Next hop type: ").append(resource.nextHopType()) .append("\n\tNext hop ip address: ").append(resource.nextHopIpAddress()) .append("\n\tRoute table id: ").append(resource.routeTableId()) .toString()); } /** * Print container group info. * * @param resource a container group */ public static void print(ContainerGroup resource) { StringBuilder info = new StringBuilder().append("Container Group: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tOS type: ").append(resource.osType()); if (resource.ipAddress() != null) { info.append("\n\tPublic IP address: ").append(resource.ipAddress()); } if (resource.externalTcpPorts() != null) { info.append("\n\tExternal TCP ports:"); for (int port : resource.externalTcpPorts()) { info.append(" ").append(port); } } if (resource.externalUdpPorts() != null) { info.append("\n\tExternal UDP ports:"); for (int port : resource.externalUdpPorts()) { info.append(" ").append(port); } } if (resource.imageRegistryServers() != null) { info.append("\n\tPrivate Docker image registries:"); for (String server : resource.imageRegistryServers()) { info.append(" ").append(server); } } if (resource.volumes() != null) { info.append("\n\tVolume mapping: "); for (Map.Entry<String, Volume> entry : resource.volumes().entrySet()) { info.append("\n\t\tName: ").append(entry.getKey()).append(" -> ") .append(entry.getValue().azureFile() != null ? entry.getValue().azureFile().shareName() : "empty direcory volume"); } } if (resource.containers() != null) { info.append("\n\tContainer instances: "); for (Map.Entry<String, Container> entry : resource.containers().entrySet()) { Container container = entry.getValue(); info.append("\n\t\tName: ").append(entry.getKey()).append(" -> ").append(container.image()); info.append("\n\t\t\tResources: "); info.append(container.resources().requests().cpu()).append("CPUs "); info.append(container.resources().requests().memoryInGB()).append("GB"); info.append("\n\t\t\tPorts:"); for (ContainerPort port : container.ports()) { info.append(" ").append(port.port()); } if (container.volumeMounts() != null) { info.append("\n\t\t\tVolume mounts:"); for (VolumeMount volumeMount : container.volumeMounts()) { info.append(" ").append(volumeMount.name()).append("->").append(volumeMount.mountPath()); } } if (container.command() != null) { info.append("\n\t\t\tStart commands:"); for (String command : container.command()) { info.append("\n\t\t\t\t").append(command); } } if (container.environmentVariables() != null) { info.append("\n\t\t\tENV vars:"); for (EnvironmentVariable envVar : container.environmentVariables()) { info.append("\n\t\t\t\t").append(envVar.name()).append("=").append(envVar.value()); } } } } System.out.println(info.toString()); } /** * Print event hub namespace. * * @param resource a virtual machine */ public static void print(EventHubNamespace resource) { StringBuilder info = new StringBuilder(); info.append("Eventhub Namespace: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tAzureInsightMetricId: ").append(resource.azureInsightMetricId()) .append("\n\tIsAutoScale enabled: ").append(resource.isAutoScaleEnabled()) .append("\n\tServiceBus endpoint: ").append(resource.serviceBusEndpoint()) .append("\n\tThroughPut upper limit: ").append(resource.throughputUnitsUpperLimit()) .append("\n\tCurrent ThroughPut: ").append(resource.currentThroughputUnits()) .append("\n\tCreated time: ").append(resource.createdAt()) .append("\n\tUpdated time: ").append(resource.updatedAt()); System.out.println(info.toString()); } /** * Print event hub. * * @param resource event hub */ public static void print(EventHub resource) { StringBuilder info = new StringBuilder(); info.append("Eventhub: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tNamespace resource group: ").append(resource.namespaceResourceGroupName()) .append("\n\tNamespace: ").append(resource.namespaceName()) .append("\n\tIs data capture enabled: ").append(resource.isDataCaptureEnabled()) .append("\n\tPartition ids: ").append(resource.partitionIds()); if (resource.isDataCaptureEnabled()) { info.append("\n\t\t\tData capture window size in MB: ").append(resource.dataCaptureWindowSizeInMB()); info.append("\n\t\t\tData capture window size in seconds: ").append(resource.dataCaptureWindowSizeInSeconds()); if (resource.captureDestination() != null) { info.append("\n\t\t\tData capture storage account: ").append(resource.captureDestination().storageAccountResourceId()); info.append("\n\t\t\tData capture storage container: ").append(resource.captureDestination().blobContainer()); } } System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing. * * @param resource event hub namespace disaster recovery pairing */ public static void print(EventHubDisasterRecoveryPairing resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tPrimary namespace resource group name: ").append(resource.primaryNamespaceResourceGroupName()) .append("\n\tPrimary namespace name: ").append(resource.primaryNamespaceName()) .append("\n\tSecondary namespace: ").append(resource.secondaryNamespaceId()) .append("\n\tNamespace role: ").append(resource.namespaceRole()); System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing auth rules. * * @param resource event hub namespace disaster recovery pairing auth rule */ public static void print(DisasterRecoveryPairingAuthorizationRule resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing auth rule: ").append(resource.name()); List<String> rightsStr = new ArrayList<>(); for (AccessRights rights : resource.rights()) { rightsStr.add(rights.toString()); } info.append("\n\tRights: ").append(rightsStr); System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing auth rule key. * * @param resource event hub namespace disaster recovery pairing auth rule key */ public static void print(DisasterRecoveryPairingAuthorizationKey resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing auth key: ") .append("\n\t Alias primary connection string: ").append(resource.aliasPrimaryConnectionString()) .append("\n\t Alias secondary connection string: ").append(resource.aliasSecondaryConnectionString()) .append("\n\t Primary key: ").append(resource.primaryKey()) .append("\n\t Secondary key: ").append(resource.secondaryKey()) .append("\n\t Primary connection string: ").append(resource.primaryConnectionString()) .append("\n\t Secondary connection string: ").append(resource.secondaryConnectionString()); System.out.println(info.toString()); } /** * Print event hub consumer group. * * @param resource event hub consumer group */ public static void print(EventHubConsumerGroup resource) { StringBuilder info = new StringBuilder(); info.append("Event hub consumer group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tNamespace resource group: ").append(resource.namespaceResourceGroupName()) .append("\n\tNamespace: ").append(resource.namespaceName()) .append("\n\tEvent hub name: ").append(resource.eventHubName()) .append("\n\tUser metadata: ").append(resource.userMetadata()); System.out.println(info.toString()); } /** * Print Diagnostic Setting. * * @param resource Diagnostic Setting instance */ public static void print(DiagnosticSetting resource) { StringBuilder info = new StringBuilder("Diagnostic Setting: ") .append("\n\tId: ").append(resource.id()) .append("\n\tAssociated resource Id: ").append(resource.resourceId()) .append("\n\tName: ").append(resource.name()) .append("\n\tStorage Account Id: ").append(resource.storageAccountId()) .append("\n\tEventHub Namespace Autorization Rule Id: ").append(resource.eventHubAuthorizationRuleId()) .append("\n\tEventHub name: ").append(resource.eventHubName()) .append("\n\tLog Analytics workspace Id: ").append(resource.workspaceId()); if (resource.logs() != null && !resource.logs().isEmpty()) { info.append("\n\tLog Settings: "); for (LogSettings ls : resource.logs()) { info.append("\n\t\tCategory: ").append(ls.category()); info.append("\n\t\tRetention policy: "); if (ls.retentionPolicy() != null) { info.append(ls.retentionPolicy().days() + " days"); } else { info.append("NONE"); } } } if (resource.metrics() != null && !resource.metrics().isEmpty()) { info.append("\n\tMetric Settings: "); for (MetricSettings ls : resource.metrics()) { info.append("\n\t\tCategory: ").append(ls.category()); info.append("\n\t\tTimegrain: ").append(ls.timeGrain()); info.append("\n\t\tRetention policy: "); if (ls.retentionPolicy() != null) { info.append(ls.retentionPolicy().days() + " days"); } else { info.append("NONE"); } } } System.out.println(info.toString()); } /** * Print Action group settings. * * @param actionGroup action group instance */ public static void print(ActionGroup actionGroup) { StringBuilder info = new StringBuilder("Action Group: ") .append("\n\tId: ").append(actionGroup.id()) .append("\n\tName: ").append(actionGroup.name()) .append("\n\tShort Name: ").append(actionGroup.shortName()); if (actionGroup.emailReceivers() != null && !actionGroup.emailReceivers().isEmpty()) { info.append("\n\tEmail receivers: "); for (EmailReceiver er : actionGroup.emailReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tEMail: ").append(er.emailAddress()); info.append("\n\t\tStatus: ").append(er.status()); info.append("\n\t\t==="); } } if (actionGroup.smsReceivers() != null && !actionGroup.smsReceivers().isEmpty()) { info.append("\n\tSMS text message receivers: "); for (SmsReceiver er : actionGroup.smsReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tPhone: ").append(er.countryCode() + er.phoneNumber()); info.append("\n\t\tStatus: ").append(er.status()); info.append("\n\t\t==="); } } if (actionGroup.webhookReceivers() != null && !actionGroup.webhookReceivers().isEmpty()) { info.append("\n\tWebhook receivers: "); for (WebhookReceiver er : actionGroup.webhookReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tURI: ").append(er.serviceUri()); info.append("\n\t\t==="); } } if (actionGroup.pushNotificationReceivers() != null && !actionGroup.pushNotificationReceivers().isEmpty()) { info.append("\n\tApp Push Notification receivers: "); for (AzureAppPushReceiver er : actionGroup.pushNotificationReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tEmail: ").append(er.emailAddress()); info.append("\n\t\t==="); } } if (actionGroup.voiceReceivers() != null && !actionGroup.voiceReceivers().isEmpty()) { info.append("\n\tVoice Message receivers: "); for (VoiceReceiver er : actionGroup.voiceReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tPhone: ").append(er.countryCode() + er.phoneNumber()); info.append("\n\t\t==="); } } if (actionGroup.automationRunbookReceivers() != null && !actionGroup.automationRunbookReceivers().isEmpty()) { info.append("\n\tAutomation Runbook receivers: "); for (AutomationRunbookReceiver er : actionGroup.automationRunbookReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tRunbook Name: ").append(er.runbookName()); info.append("\n\t\tAccount Id: ").append(er.automationAccountId()); info.append("\n\t\tIs Global: ").append(er.isGlobalRunbook()); info.append("\n\t\tService URI: ").append(er.serviceUri()); info.append("\n\t\tWebhook resource Id: ").append(er.webhookResourceId()); info.append("\n\t\t==="); } } if (actionGroup.azureFunctionReceivers() != null && !actionGroup.azureFunctionReceivers().isEmpty()) { info.append("\n\tAzure Functions receivers: "); for (AzureFunctionReceiver er : actionGroup.azureFunctionReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tFunction Name: ").append(er.functionName()); info.append("\n\t\tFunction App Resource Id: ").append(er.functionAppResourceId()); info.append("\n\t\tFunction Trigger URI: ").append(er.httpTriggerUrl()); info.append("\n\t\t==="); } } if (actionGroup.logicAppReceivers() != null && !actionGroup.logicAppReceivers().isEmpty()) { info.append("\n\tLogic App receivers: "); for (LogicAppReceiver er : actionGroup.logicAppReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tResource Id: ").append(er.resourceId()); info.append("\n\t\tCallback URL: ").append(er.callbackUrl()); info.append("\n\t\t==="); } } if (actionGroup.itsmReceivers() != null && !actionGroup.itsmReceivers().isEmpty()) { info.append("\n\tITSM receivers: "); for (ItsmReceiver er : actionGroup.itsmReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tWorkspace Id: ").append(er.workspaceId()); info.append("\n\t\tConnection Id: ").append(er.connectionId()); info.append("\n\t\tRegion: ").append(er.region()); info.append("\n\t\tTicket Configuration: ").append(er.ticketConfiguration()); info.append("\n\t\t==="); } } System.out.println(info.toString()); } /** * Print activity log alert settings. * * @param activityLogAlert activity log instance */ public static void print(ActivityLogAlert activityLogAlert) { StringBuilder info = new StringBuilder("Activity Log Alert: ") .append("\n\tId: ").append(activityLogAlert.id()) .append("\n\tName: ").append(activityLogAlert.name()) .append("\n\tDescription: ").append(activityLogAlert.description()) .append("\n\tIs Enabled: ").append(activityLogAlert.enabled()); if (activityLogAlert.scopes() != null && !activityLogAlert.scopes().isEmpty()) { info.append("\n\tScopes: "); for (String er : activityLogAlert.scopes()) { info.append("\n\t\tId: ").append(er); } } if (activityLogAlert.actionGroupIds() != null && !activityLogAlert.actionGroupIds().isEmpty()) { info.append("\n\tAction Groups: "); for (String er : activityLogAlert.actionGroupIds()) { info.append("\n\t\tAction Group Id: ").append(er); } } if (activityLogAlert.equalsConditions() != null && !activityLogAlert.equalsConditions().isEmpty()) { info.append("\n\tAlert conditions (when all of is true): "); for (Map.Entry<String, String> er : activityLogAlert.equalsConditions().entrySet()) { info.append("\n\t\t'").append(er.getKey()).append("' equals '").append(er.getValue()).append("'"); } } System.out.println(info.toString()); } /** * Print metric alert settings. * * @param metricAlert metric alert instance */ public static void print(MetricAlert metricAlert) { StringBuilder info = new StringBuilder("Metric Alert: ") .append("\n\tId: ").append(metricAlert.id()) .append("\n\tName: ").append(metricAlert.name()) .append("\n\tDescription: ").append(metricAlert.description()) .append("\n\tIs Enabled: ").append(metricAlert.enabled()) .append("\n\tIs Auto Mitigated: ").append(metricAlert.autoMitigate()) .append("\n\tSeverity: ").append(metricAlert.severity()) .append("\n\tWindow Size: ").append(metricAlert.windowSize()) .append("\n\tEvaluation Frequency: ").append(metricAlert.evaluationFrequency()); if (metricAlert.scopes() != null && !metricAlert.scopes().isEmpty()) { info.append("\n\tScopes: "); for (String er : metricAlert.scopes()) { info.append("\n\t\tId: ").append(er); } } if (metricAlert.actionGroupIds() != null && !metricAlert.actionGroupIds().isEmpty()) { info.append("\n\tAction Groups: "); for (String er : metricAlert.actionGroupIds()) { info.append("\n\t\tAction Group Id: ").append(er); } } if (metricAlert.alertCriterias() != null && !metricAlert.alertCriterias().isEmpty()) { info.append("\n\tAlert conditions (when all of is true): "); for (Map.Entry<String, MetricAlertCondition> er : metricAlert.alertCriterias().entrySet()) { MetricAlertCondition alertCondition = er.getValue(); info.append("\n\t\tCondition name: ").append(er.getKey()) .append("\n\t\tSignal name: ").append(alertCondition.metricName()) .append("\n\t\tMetric Namespace: ").append(alertCondition.metricNamespace()) .append("\n\t\tOperator: ").append(alertCondition.condition()) .append("\n\t\tThreshold: ").append(alertCondition.threshold()) .append("\n\t\tTime Aggregation: ").append(alertCondition.timeAggregation()); if (alertCondition.dimensions() != null && !alertCondition.dimensions().isEmpty()) { for (MetricDimension dimon : alertCondition.dimensions()) { info.append("\n\t\tDimension Filter: ").append("Name [").append(dimon.name()).append("] operator [Include] values["); for (String vals : dimon.values()) { info.append(vals).append(", "); } info.append("]"); } } } } System.out.println(info.toString()); } /** * Print spring service settings. * * @param springService spring service instance */ public static void print(SpringService springService) { StringBuilder info = new StringBuilder("Spring Service: ") .append("\n\tId: ").append(springService.id()) .append("\n\tName: ").append(springService.name()) .append("\n\tResource Group: ").append(springService.resourceGroupName()) .append("\n\tRegion: ").append(springService.region()) .append("\n\tTags: ").append(springService.tags()); ConfigServerProperties serverProperties = springService.getServerProperties(); if (serverProperties != null && serverProperties.provisioningState() != null && serverProperties.provisioningState().equals(ConfigServerState.SUCCEEDED) && serverProperties.configServer() != null) { info.append("\n\tProperties: "); if (serverProperties.configServer().gitProperty() != null) { info.append("\n\t\tGit: ").append(serverProperties.configServer().gitProperty().uri()); } } if (springService.sku() != null) { info.append("\n\tSku: ") .append("\n\t\tName: ").append(springService.sku().name()) .append("\n\t\tTier: ").append(springService.sku().tier()) .append("\n\t\tCapacity: ").append(springService.sku().capacity()); } MonitoringSettingProperties monitoringSettingProperties = springService.getMonitoringSetting(); if (monitoringSettingProperties != null && monitoringSettingProperties.provisioningState() != null && monitoringSettingProperties.provisioningState().equals(MonitoringSettingState.SUCCEEDED)) { info.append("\n\tTrace: ") .append("\n\t\tEnabled: ").append(monitoringSettingProperties.traceEnabled()) .append("\n\t\tApp Insight Instrumentation Key: ").append(monitoringSettingProperties.appInsightsInstrumentationKey()); } System.out.println(info.toString()); } /** * Print spring app settings. * * @param springApp spring app instance */ public static void print(SpringApp springApp) { StringBuilder info = new StringBuilder("Spring Service: ") .append("\n\tId: ").append(springApp.id()) .append("\n\tName: ").append(springApp.name()) .append("\n\tPublic Endpoint: ").append(springApp.isPublic()) .append("\n\tUrl: ").append(springApp.url()) .append("\n\tHttps Only: ").append(springApp.isHttpsOnly()) .append("\n\tFully Qualified Domain Name: ").append(springApp.fqdn()) .append("\n\tActive Deployment Name: ").append(springApp.activeDeploymentName()); if (springApp.temporaryDisk() != null) { info.append("\n\tTemporary Disk:") .append("\n\t\tSize In GB: ").append(springApp.temporaryDisk().sizeInGB()) .append("\n\t\tMount Path: ").append(springApp.temporaryDisk().mountPath()); } if (springApp.persistentDisk() != null) { info.append("\n\tPersistent Disk:") .append("\n\t\tSize In GB: ").append(springApp.persistentDisk().sizeInGB()) .append("\n\t\tMount Path: ").append(springApp.persistentDisk().mountPath()); } if (springApp.identity() != null) { info.append("\n\tIdentity:") .append("\n\t\tType: ").append(springApp.identity().type()) .append("\n\t\tPrincipal Id: ").append(springApp.identity().principalId()) .append("\n\t\tTenant Id: ").append(springApp.identity().tenantId()); } System.out.println(info.toString()); } /** * Print private link resource. * * @param privateLinkResource the private link resource */ public static void print(PrivateLinkResource privateLinkResource) { StringBuilder info = new StringBuilder("Private Link Resource: ") .append("\n\tGroup ID: ").append(privateLinkResource.groupId()) .append("\n\tRequired Member Names: ").append(privateLinkResource.requiredMemberNames()) .append("\n\tRequired DNS Zone Names: ").append(privateLinkResource.requiredDnsZoneNames()); System.out.println(info); } /** * Print private endpoint. * * @param privateEndpoint the private endpoint */ public static void print(PrivateEndpoint privateEndpoint) { StringBuilder info = new StringBuilder("Private Endpoint: ") .append("\n\tId: ").append(privateEndpoint.id()) .append("\n\tName: ").append(privateEndpoint.name()); if (privateEndpoint.privateLinkServiceConnections() != null && !privateEndpoint.privateLinkServiceConnections().isEmpty()) { for (PrivateEndpoint.PrivateLinkServiceConnection connection : privateEndpoint.privateLinkServiceConnections().values()) { info .append("\n\t\tPrivate Link Service Connection Name: ").append(connection.name()) .append("\n\t\tPrivate Link Resource ID: ").append(connection.privateLinkResourceId()) .append("\n\t\tSub Resource Names: ").append(connection.subResourceNames()) .append("\n\t\tProvision Status: ").append(connection.state().status()); } } if (privateEndpoint.privateLinkServiceConnections() != null && !privateEndpoint.privateLinkServiceConnections().isEmpty()) { info.append("\n\tPrivate Link Service Connections:"); for (PrivateEndpoint.PrivateLinkServiceConnection connection : privateEndpoint.privateLinkServiceConnections().values()) { info .append("\n\t\tName: ").append(connection.name()) .append("\n\t\tPrivate Link Resource ID: ").append(connection.privateLinkResourceId()) .append("\n\t\tSub Resource Names: ").append(connection.subResourceNames()) .append("\n\t\tStatus: ").append(connection.state().status()); } } if (privateEndpoint.customDnsConfigurations() != null && !privateEndpoint.customDnsConfigurations().isEmpty()) { info.append("\n\tCustom DNS Configure:"); for (CustomDnsConfigPropertiesFormat customDns : privateEndpoint.customDnsConfigurations()) { info .append("\n\t\tFQDN: ").append(customDns.fqdn()) .append("\n\t\tIP Address: ").append(customDns.ipAddresses()); } } System.out.println(info); } /** * Sends a GET request to target URL. * <p> * Retry logic tuned for AppService. * The method does not handle 301 redirect. * * @param urlString the target URL. * @return Content of the HTTP response. */ public static String sendGetRequest(String urlString) { HttpRequest request = new HttpRequest(HttpMethod.GET, urlString); Mono<Response<String>> response = stringResponse(HTTP_PIPELINE.send(request) .flatMap(response1 -> { int code = response1.getStatusCode(); if (code == 200 || code == 400 || code == 404) { return Mono.just(response1); } else { return Mono.error(new HttpResponseException(response1)); } }) .retryWhen(Retry .fixedDelay(5, Duration.ofSeconds(30)) .filter(t -> { boolean retry = false; if (t instanceof TimeoutException) { retry = true; } else if (t instanceof HttpResponseException && ((HttpResponseException) t).getResponse().getStatusCode() == 503) { retry = true; } if (retry) { LOGGER.info("retry GET request to {}", urlString); } return retry; }))); Response<String> ret = response.block(); return ret == null ? null : ret.getValue(); } /** * Sends a POST request to target URL. * <p> * Retry logic tuned for AppService. * * @param urlString the target URL. * @param body the request body. * @return Content of the HTTP response. * */ public static String sendPostRequest(String urlString, String body) { try { HttpRequest request = new HttpRequest(HttpMethod.POST, urlString).setBody(body); Mono<Response<String>> response = stringResponse(HTTP_PIPELINE.send(request) .flatMap(response1 -> { int code = response1.getStatusCode(); if (code == 200 || code == 400 || code == 404) { return Mono.just(response1); } else { return Mono.error(new HttpResponseException(response1)); } }) .retryWhen(Retry .fixedDelay(5, Duration.ofSeconds(30)) .filter(t -> { boolean retry = false; if (t instanceof TimeoutException) { retry = true; } if (retry) { LOGGER.info("retry POST request to {}", urlString); } return retry; }))); Response<String> ret = response.block(); return ret == null ? null : ret.getValue(); } catch (Exception e) { LOGGER.logThrowableAsError(e); return null; } } private static Mono<Response<String>> stringResponse(Mono<HttpResponse> responseMono) { return responseMono.flatMap(response -> response.getBodyAsString() .map(str -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), str))); } private static final HttpPipeline HTTP_PIPELINE = new HttpPipelineBuilder() .policies( new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)), new RetryPolicy("Retry-After", ChronoUnit.SECONDS)) .build(); /** * Get the size of the iterable. * * @param iterable iterable to count size * @param <T> generic type parameter of the iterable * @return size of the iterable */ public static <T> int getSize(Iterable<T> iterable) { int res = 0; Iterator<T> iterator = iterable.iterator(); while (iterator.hasNext()) { iterator.next(); res++; } return res; } }
class Utils { private static final ClientLogger LOGGER = new ClientLogger(Utils.class); private static String sshPublicKey; private Utils() { } /** @return a generated password */ public static String password() { String password = new ResourceManagerUtils.InternalRuntimeContext().randomResourceName("Pa5$", 12); System.out.printf("Password: %s%n", password); return password; } /** * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } /** * Creates a randomized resource name. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @param prefix the prefix to the name. * @param maxLen the max length of the name. * @return the randomized resource name. */ public static String randomResourceName(AzureResourceManager azure, String prefix, int maxLen) { return azure.resourceGroups().manager().internalContext().randomResourceName(prefix, maxLen); } /** * Generates the specified number of random resource names with the same prefix. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @param prefix the prefix to be used if possible * @param maxLen the maximum length for the random generated name * @param count the number of names to generate * @return the randomized resource names. */ public static String[] randomResourceNames(AzureResourceManager azure, String prefix, int maxLen, int count) { String[] names = new String[count]; for (int i = 0; i < count; i++) { names[i] = randomResourceName(azure, prefix, maxLen); } return names; } /** * Creates a random UUID. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @return the random UUID. */ public static String randomUuid(AzureResourceManager azure) { return azure.resourceGroups().manager().internalContext().randomUuid(); } /** * Creates a randomized resource name. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param authenticated the AzureResourceManager.Authenticated instance. * @param prefix the prefix to the name. * @param maxLen the max length of the name. * @return the randomized resource name. */ public static String randomResourceName(AzureResourceManager.Authenticated authenticated, String prefix, int maxLen) { return authenticated.roleAssignments().manager().internalContext().randomResourceName(prefix, maxLen); } /** * Print resource group info. * * @param resource a resource group */ public static void print(ResourceGroup resource) { StringBuilder info = new StringBuilder(); info.append("Resource Group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()); System.out.println(info.toString()); } /** * Print User Assigned MSI info. * * @param resource a User Assigned MSI */ public static void print(Identity resource) { StringBuilder info = new StringBuilder(); info.append("Resource Group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tService Principal Id: ").append(resource.principalId()) .append("\n\tClient Id: ").append(resource.clientId()) .append("\n\tTenant Id: ").append(resource.tenantId()); System.out.println(info.toString()); } /** * Print virtual machine info. * * @param resource a virtual machine */ public static void print(VirtualMachine resource) { StringBuilder storageProfile = new StringBuilder().append("\n\tStorageProfile: "); if (resource.storageProfile().imageReference() != null) { storageProfile.append("\n\t\tImageReference:"); storageProfile.append("\n\t\t\tPublisher: ").append(resource.storageProfile().imageReference().publisher()); storageProfile.append("\n\t\t\tOffer: ").append(resource.storageProfile().imageReference().offer()); storageProfile.append("\n\t\t\tSKU: ").append(resource.storageProfile().imageReference().sku()); storageProfile.append("\n\t\t\tVersion: ").append(resource.storageProfile().imageReference().version()); } if (resource.storageProfile().osDisk() != null) { storageProfile.append("\n\t\tOSDisk:"); storageProfile.append("\n\t\t\tOSType: ").append(resource.storageProfile().osDisk().osType()); storageProfile.append("\n\t\t\tName: ").append(resource.storageProfile().osDisk().name()); storageProfile.append("\n\t\t\tCaching: ").append(resource.storageProfile().osDisk().caching()); storageProfile.append("\n\t\t\tCreateOption: ").append(resource.storageProfile().osDisk().createOption()); storageProfile.append("\n\t\t\tDiskSizeGB: ").append(resource.storageProfile().osDisk().diskSizeGB()); if (resource.storageProfile().osDisk().managedDisk() != null) { if (resource.storageProfile().osDisk().managedDisk().diskEncryptionSet() != null) { storageProfile.append("\n\t\t\tDiskEncryptionSet Id: ") .append(resource.storageProfile().osDisk().managedDisk().diskEncryptionSet().id()); } } if (resource.storageProfile().osDisk().image() != null) { storageProfile.append("\n\t\t\tImage Uri: ").append(resource.storageProfile().osDisk().image().uri()); } if (resource.storageProfile().osDisk().vhd() != null) { storageProfile.append("\n\t\t\tVhd Uri: ").append(resource.storageProfile().osDisk().vhd().uri()); } if (resource.storageProfile().osDisk().encryptionSettings() != null) { storageProfile.append("\n\t\t\tEncryptionSettings: "); storageProfile.append("\n\t\t\t\tEnabled: ").append(resource.storageProfile().osDisk().encryptionSettings().enabled()); storageProfile.append("\n\t\t\t\tDiskEncryptionKey Uri: ").append(resource .storageProfile() .osDisk() .encryptionSettings() .diskEncryptionKey().secretUrl()); storageProfile.append("\n\t\t\t\tKeyEncryptionKey Uri: ").append(resource .storageProfile() .osDisk() .encryptionSettings() .keyEncryptionKey().keyUrl()); } } if (resource.storageProfile().dataDisks() != null) { int i = 0; for (DataDisk disk : resource.storageProfile().dataDisks()) { storageProfile.append("\n\t\tDataDisk: storageProfile.append("\n\t\t\tName: ").append(disk.name()); storageProfile.append("\n\t\t\tCaching: ").append(disk.caching()); storageProfile.append("\n\t\t\tCreateOption: ").append(disk.createOption()); storageProfile.append("\n\t\t\tDiskSizeGB: ").append(disk.diskSizeGB()); storageProfile.append("\n\t\t\tLun: ").append(disk.lun()); if (resource.isManagedDiskEnabled()) { if (disk.managedDisk() != null) { storageProfile.append("\n\t\t\tManaged Disk Id: ").append(disk.managedDisk().id()); if (disk.managedDisk().diskEncryptionSet() != null) { storageProfile.append("\n\t\t\tDiskEncryptionSet Id: ").append(disk.managedDisk().diskEncryptionSet().id()); } } } else { if (disk.vhd().uri() != null) { storageProfile.append("\n\t\t\tVhd Uri: ").append(disk.vhd().uri()); } } if (disk.image() != null) { storageProfile.append("\n\t\t\tImage Uri: ").append(disk.image().uri()); } } } StringBuilder osProfile = new StringBuilder().append("\n\tOSProfile: "); if (resource.osProfile() != null) { osProfile.append("\n\t\tComputerName:").append(resource.osProfile().computerName()); if (resource.osProfile().windowsConfiguration() != null) { osProfile.append("\n\t\t\tWindowsConfiguration: "); osProfile.append("\n\t\t\t\tProvisionVMAgent: ") .append(resource.osProfile().windowsConfiguration().provisionVMAgent()); osProfile.append("\n\t\t\t\tEnableAutomaticUpdates: ") .append(resource.osProfile().windowsConfiguration().enableAutomaticUpdates()); osProfile.append("\n\t\t\t\tTimeZone: ") .append(resource.osProfile().windowsConfiguration().timeZone()); } if (resource.osProfile().linuxConfiguration() != null) { osProfile.append("\n\t\t\tLinuxConfiguration: "); osProfile.append("\n\t\t\t\tDisablePasswordAuthentication: ") .append(resource.osProfile().linuxConfiguration().disablePasswordAuthentication()); } } else { osProfile.append("null"); } StringBuilder networkProfile = new StringBuilder().append("\n\tNetworkProfile: "); for (String networkInterfaceId : resource.networkInterfaceIds()) { networkProfile.append("\n\t\tId:").append(networkInterfaceId); } StringBuilder extensions = new StringBuilder().append("\n\tExtensions: "); for (Map.Entry<String, VirtualMachineExtension> extensionEntry : resource.listExtensions().entrySet()) { VirtualMachineExtension extension = extensionEntry.getValue(); extensions.append("\n\t\tExtension: ").append(extension.id()) .append("\n\t\t\tName: ").append(extension.name()) .append("\n\t\t\tTags: ").append(extension.tags()) .append("\n\t\t\tProvisioningState: ").append(extension.provisioningState()) .append("\n\t\t\tAuto upgrade minor version enabled: ").append(extension.autoUpgradeMinorVersionEnabled()) .append("\n\t\t\tPublisher: ").append(extension.publisherName()) .append("\n\t\t\tType: ").append(extension.typeName()) .append("\n\t\t\tVersion: ").append(extension.versionName()) .append("\n\t\t\tPublic Settings: ").append(extension.publicSettingsAsJsonString()); } StringBuilder msi = new StringBuilder().append("\n\tMSI: "); msi.append("\n\t\t\tMSI enabled:").append(resource.isManagedServiceIdentityEnabled()); msi.append("\n\t\t\tSystem Assigned MSI Active Directory Service Principal Id:").append(resource.systemAssignedManagedServiceIdentityPrincipalId()); msi.append("\n\t\t\tSystem Assigned MSI Active Directory Tenant Id:").append(resource.systemAssignedManagedServiceIdentityTenantId()); StringBuilder zones = new StringBuilder().append("\n\tZones: "); zones.append(resource.availabilityZones()); StringBuilder securityProfile = new StringBuilder().append("\n\tSecurityProfile: "); securityProfile.append("\n\t\t\tSecurity type: ").append(resource.securityType()); securityProfile.append("\n\t\t\tSecure Boot enabled: ").append(resource.isSecureBootEnabled()); securityProfile.append("\n\t\t\tvTPM enabled: ").append(resource.isVTpmEnabled()); System.out.println(new StringBuilder().append("Virtual Machine: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tHardwareProfile: ") .append("\n\t\tSize: ").append(resource.size()) .append(storageProfile) .append(osProfile) .append(networkProfile) .append(extensions) .append(msi) .append(zones) .append(securityProfile) .toString()); } /** * Print availability set info. * * @param resource an availability set */ public static void print(AvailabilitySet resource) { System.out.println(new StringBuilder().append("Availability Set: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tFault domain count: ").append(resource.faultDomainCount()) .append("\n\tUpdate domain count: ").append(resource.updateDomainCount()) .toString()); } /** * Print network info. * * @param resource a network * @throws ManagementException Cloud errors */ public static void print(Network resource) { StringBuilder info = new StringBuilder(); info.append("Network: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tAddress spaces: ").append(resource.addressSpaces()) .append("\n\tDNS server IPs: ").append(resource.dnsServerIPs()); for (Subnet subnet : resource.subnets().values()) { info.append("\n\tSubnet: ").append(subnet.name()) .append("\n\t\tAddress prefix: ").append(subnet.addressPrefix()); NetworkSecurityGroup subnetNsg = subnet.getNetworkSecurityGroup(); if (subnetNsg != null) { info.append("\n\t\tNetwork security group ID: ").append(subnetNsg.id()); } RouteTable routeTable = subnet.getRouteTable(); if (routeTable != null) { info.append("\n\tRoute table ID: ").append(routeTable.id()); } Map<ServiceEndpointType, List<Region>> services = subnet.servicesWithAccess(); if (services.size() > 0) { info.append("\n\tServices with access"); for (Map.Entry<ServiceEndpointType, List<Region>> service : services.entrySet()) { info.append("\n\t\tService: ") .append(service.getKey()) .append(" Regions: " + service.getValue() + ""); } } } for (NetworkPeering peering : resource.peerings().list()) { info.append("\n\tPeering: ").append(peering.name()) .append("\n\t\tRemote network ID: ").append(peering.remoteNetworkId()) .append("\n\t\tPeering state: ").append(peering.state()) .append("\n\t\tIs traffic forwarded from remote network allowed? ").append(peering.isTrafficForwardingFromRemoteNetworkAllowed()) .append("\n\t\tGateway use: ").append(peering.gatewayUse()); } System.out.println(info.toString()); } /** * Print network interface. * * @param resource a network interface */ public static void print(NetworkInterface resource) { StringBuilder info = new StringBuilder(); info.append("NetworkInterface: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tInternal DNS name label: ").append(resource.internalDnsNameLabel()) .append("\n\tInternal FQDN: ").append(resource.internalFqdn()) .append("\n\tInternal domain name suffix: ").append(resource.internalDomainNameSuffix()) .append("\n\tNetwork security group: ").append(resource.networkSecurityGroupId()) .append("\n\tApplied DNS servers: ").append(resource.appliedDnsServers().toString()) .append("\n\tDNS server IPs: "); for (String dnsServerIp : resource.dnsServers()) { info.append("\n\t\t").append(dnsServerIp); } info.append("\n\tIP forwarding enabled? ").append(resource.isIPForwardingEnabled()) .append("\n\tAccelerated networking enabled? ").append(resource.isAcceleratedNetworkingEnabled()) .append("\n\tMAC Address:").append(resource.macAddress()) .append("\n\tPrivate IP:").append(resource.primaryPrivateIP()) .append("\n\tPrivate allocation method:").append(resource.primaryPrivateIpAllocationMethod()) .append("\n\tPrimary virtual network ID: ").append(resource.primaryIPConfiguration().networkId()) .append("\n\tPrimary subnet name:").append(resource.primaryIPConfiguration().subnetName()); System.out.println(info.toString()); } /** * Print network security group. * * @param resource a network security group */ public static void print(NetworkSecurityGroup resource) { StringBuilder info = new StringBuilder(); info.append("NSG: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()); for (NetworkSecurityRule rule : resource.securityRules().values()) { info.append("\n\tRule: ").append(rule.name()) .append("\n\t\tAccess: ").append(rule.access()) .append("\n\t\tDirection: ").append(rule.direction()) .append("\n\t\tFrom address: ").append(rule.sourceAddressPrefix()) .append("\n\t\tFrom port range: ").append(rule.sourcePortRange()) .append("\n\t\tTo address: ").append(rule.destinationAddressPrefix()) .append("\n\t\tTo port: ").append(rule.destinationPortRange()) .append("\n\t\tProtocol: ").append(rule.protocol()) .append("\n\t\tPriority: ").append(rule.priority()); } System.out.println(info.toString()); } /** * Print public IP address. * * @param resource a public IP address */ public static void print(PublicIpAddress resource) { System.out.println(new StringBuilder().append("Public IP Address: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tIP Address: ").append(resource.ipAddress()) .append("\n\tLeaf domain label: ").append(resource.leafDomainLabel()) .append("\n\tFQDN: ").append(resource.fqdn()) .append("\n\tReverse FQDN: ").append(resource.reverseFqdn()) .append("\n\tIdle timeout (minutes): ").append(resource.idleTimeoutInMinutes()) .append("\n\tIP allocation method: ").append(resource.ipAllocationMethod()) .append("\n\tZones: ").append(resource.availabilityZones()) .toString()); } /** * Print a key vault. * * @param vault the key vault resource */ public static void print(Vault vault) { StringBuilder info = new StringBuilder().append("Key Vault: ").append(vault.id()) .append("Name: ").append(vault.name()) .append("\n\tResource group: ").append(vault.resourceGroupName()) .append("\n\tRegion: ").append(vault.region()) .append("\n\tSku: ").append(vault.sku().name()).append(" - ").append(vault.sku().family()) .append("\n\tVault URI: ").append(vault.vaultUri()) .append("\n\tAccess policies: "); for (AccessPolicy accessPolicy : vault.accessPolicies()) { info.append("\n\t\tIdentity:").append(accessPolicy.objectId()); if (accessPolicy.permissions() != null) { if (accessPolicy.permissions().keys() != null) { info.append("\n\t\tKey permissions: ").append(accessPolicy.permissions().keys().stream().map(KeyPermissions::toString).collect(Collectors.joining(", "))); } if (accessPolicy.permissions().secrets() != null) { info.append("\n\t\tSecret permissions: ").append(accessPolicy.permissions().secrets().stream().map(SecretPermissions::toString).collect(Collectors.joining(", "))); } if (accessPolicy.permissions().certificates() != null) { info.append("\n\t\tCertificate permissions: ").append(accessPolicy.permissions().certificates().stream().map(CertificatePermissions::toString).collect(Collectors.joining(", "))); } } } System.out.println(info.toString()); } /** * Print storage account. * * @param storageAccount a storage account */ public static void print(StorageAccount storageAccount) { System.out.println(storageAccount.name() + " created @ " + storageAccount.creationTime()); StringBuilder info = new StringBuilder().append("Storage Account: ").append(storageAccount.id()) .append("Name: ").append(storageAccount.name()) .append("\n\tResource group: ").append(storageAccount.resourceGroupName()) .append("\n\tRegion: ").append(storageAccount.region()) .append("\n\tSKU: ").append(storageAccount.skuType().name().toString()) .append("\n\tAccessTier: ").append(storageAccount.accessTier()) .append("\n\tKind: ").append(storageAccount.kind()); info.append("\n\tNetwork Rule Configuration: ") .append("\n\t\tAllow reading logs from any network: ").append(storageAccount.canReadLogEntriesFromAnyNetwork()) .append("\n\t\tAllow reading metrics from any network: ").append(storageAccount.canReadMetricsFromAnyNetwork()) .append("\n\t\tAllow access from all azure services: ").append(storageAccount.canAccessFromAzureServices()); if (storageAccount.networkSubnetsWithAccess().size() > 0) { info.append("\n\t\tNetwork subnets with access: "); for (String subnetId : storageAccount.networkSubnetsWithAccess()) { info.append("\n\t\t\t").append(subnetId); } } if (storageAccount.ipAddressesWithAccess().size() > 0) { info.append("\n\t\tIP addresses with access: "); for (String ipAddress : storageAccount.ipAddressesWithAccess()) { info.append("\n\t\t\t").append(ipAddress); } } if (storageAccount.ipAddressRangesWithAccess().size() > 0) { info.append("\n\t\tIP address-ranges with access: "); for (String ipAddressRange : storageAccount.ipAddressRangesWithAccess()) { info.append("\n\t\t\t").append(ipAddressRange); } } info.append("\n\t\tTraffic allowed from only HTTPS: ").append(storageAccount.innerModel().enableHttpsTrafficOnly()); info.append("\n\tEncryption status: "); info.append("\n\t\tInfrastructure Encryption: ").append(storageAccount.infrastructureEncryptionEnabled() ? "Enabled" : "Disabled"); for (Map.Entry<StorageService, StorageAccountEncryptionStatus> eStatus : storageAccount.encryptionStatuses().entrySet()) { info.append("\n\t\t").append(eStatus.getValue().storageService()).append(": ").append(eStatus.getValue().isEnabled() ? "Enabled" : "Disabled"); } System.out.println(info.toString()); } /** * Print storage account keys. * * @param storageAccountKeys a list of storage account keys */ public static void print(List<StorageAccountKey> storageAccountKeys) { for (int i = 0; i < storageAccountKeys.size(); i++) { StorageAccountKey storageAccountKey = storageAccountKeys.get(i); System.out.println("Key (" + i + ") " + storageAccountKey.keyName() + "=" + storageAccountKey.value()); } } /** * Print Redis Cache. * * @param redisCache a Redis cache. */ public static void print(RedisCache redisCache) { StringBuilder redisInfo = new StringBuilder() .append("Redis Cache Name: ").append(redisCache.name()) .append("\n\tResource group: ").append(redisCache.resourceGroupName()) .append("\n\tRegion: ").append(redisCache.region()) .append("\n\tSKU Name: ").append(redisCache.sku().name()) .append("\n\tSKU Family: ").append(redisCache.sku().family()) .append("\n\tHostname: ").append(redisCache.hostname()) .append("\n\tSSL port: ").append(redisCache.sslPort()) .append("\n\tNon-SSL port (6379) enabled: ").append(redisCache.nonSslPort()); if (redisCache.redisConfiguration() != null && !redisCache.redisConfiguration().isEmpty()) { redisInfo.append("\n\tRedis Configuration:"); for (Map.Entry<String, String> redisConfiguration : redisCache.redisConfiguration().entrySet()) { redisInfo.append("\n\t '").append(redisConfiguration.getKey()) .append("' : '").append(redisConfiguration.getValue()).append("'"); } } if (redisCache.isPremium()) { RedisCachePremium premium = redisCache.asPremium(); List<ScheduleEntry> scheduleEntries = premium.listPatchSchedules(); if (scheduleEntries != null && !scheduleEntries.isEmpty()) { redisInfo.append("\n\tRedis Patch Schedule:"); for (ScheduleEntry schedule : scheduleEntries) { redisInfo.append("\n\t\tDay: '").append(schedule.dayOfWeek()) .append("', start at: '").append(schedule.startHourUtc()) .append("', maintenance window: '").append(schedule.maintenanceWindow()) .append("'"); } } } System.out.println(redisInfo.toString()); } /** * Print Redis Cache access keys. * * @param redisAccessKeys a keys for Redis Cache */ public static void print(RedisAccessKeys redisAccessKeys) { StringBuilder redisKeys = new StringBuilder() .append("Redis Access Keys: ") .append("\n\tPrimary Key: '").append(redisAccessKeys.primaryKey()).append("', ") .append("\n\tSecondary Key: '").append(redisAccessKeys.secondaryKey()).append("', "); System.out.println(redisKeys.toString()); } /** * Print management lock. * * @param lock a management lock */ public static void print(ManagementLock lock) { StringBuilder info = new StringBuilder(); info.append("\nLock ID: ").append(lock.id()) .append("\nLocked resource ID: ").append(lock.lockedResourceId()) .append("\nLevel: ").append(lock.level()); System.out.println(info.toString()); } /** * Print load balancer. * * @param resource a load balancer */ public static void print(LoadBalancer resource) { StringBuilder info = new StringBuilder(); info.append("Load balancer: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tBackends: ").append(resource.backends().keySet().toString()); info.append("\n\tPublic IP address IDs: ") .append(resource.publicIpAddressIds().size()); for (String pipId : resource.publicIpAddressIds()) { info.append("\n\t\tPIP id: ").append(pipId); } info.append("\n\tTCP probes: ") .append(resource.tcpProbes().size()); for (LoadBalancerTcpProbe probe : resource.tcpProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tHTTP probes: ") .append(resource.httpProbes().size()); for (LoadBalancerHttpProbe probe : resource.httpProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()) .append("\n\t\t\tHTTP request path: ").append(probe.requestPath()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tHTTPS probes: ") .append(resource.httpsProbes().size()); for (LoadBalancerHttpProbe probe : resource.httpsProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()) .append("\n\t\t\tHTTPS request path: ").append(probe.requestPath()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tLoad balancing rules: ") .append(resource.loadBalancingRules().size()); for (LoadBalancingRule rule : resource.loadBalancingRules().values()) { info.append("\n\t\tLB rule name: ").append(rule.name()) .append("\n\t\t\tProtocol: ").append(rule.protocol()) .append("\n\t\t\tFloating IP enabled? ").append(rule.floatingIPEnabled()) .append("\n\t\t\tIdle timeout in minutes: ").append(rule.idleTimeoutInMinutes()) .append("\n\t\t\tLoad distribution method: ").append(rule.loadDistribution().toString()); LoadBalancerFrontend frontend = rule.frontend(); info.append("\n\t\t\tFrontend: "); if (frontend != null) { info.append(frontend.name()); } else { info.append("(None)"); } info.append("\n\t\t\tFrontend port: ").append(rule.frontendPort()); LoadBalancerBackend backend = rule.backend(); info.append("\n\t\t\tBackend: "); if (backend != null) { info.append(backend.name()); } else { info.append("(None)"); } info.append("\n\t\t\tBackend port: ").append(rule.backendPort()); LoadBalancerProbe probe = rule.probe(); info.append("\n\t\t\tProbe: "); if (probe == null) { info.append("(None)"); } else { info.append(probe.name()).append(" [").append(probe.protocol().toString()).append("]"); } } info.append("\n\tFrontends: ") .append(resource.frontends().size()); for (LoadBalancerFrontend frontend : resource.frontends().values()) { info.append("\n\t\tFrontend name: ").append(frontend.name()) .append("\n\t\t\tInternet facing: ").append(frontend.isPublic()); if (frontend.isPublic()) { info.append("\n\t\t\tPublic IP Address ID: ").append(((LoadBalancerPublicFrontend) frontend).publicIpAddressId()); } else { info.append("\n\t\t\tVirtual network ID: ").append(((LoadBalancerPrivateFrontend) frontend).networkId()) .append("\n\t\t\tSubnet name: ").append(((LoadBalancerPrivateFrontend) frontend).subnetName()) .append("\n\t\t\tPrivate IP address: ").append(((LoadBalancerPrivateFrontend) frontend).privateIpAddress()) .append("\n\t\t\tPrivate IP allocation method: ").append(((LoadBalancerPrivateFrontend) frontend).privateIpAllocationMethod()); } info.append("\n\t\t\tReferenced inbound NAT pools: ") .append(frontend.inboundNatPools().size()); for (LoadBalancerInboundNatPool pool : frontend.inboundNatPools().values()) { info.append("\n\t\t\t\tName: ").append(pool.name()); } info.append("\n\t\t\tReferenced inbound NAT rules: ") .append(frontend.inboundNatRules().size()); for (LoadBalancerInboundNatRule rule : frontend.inboundNatRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } info.append("\n\t\t\tReferenced load balancing rules: ") .append(frontend.loadBalancingRules().size()); for (LoadBalancingRule rule : frontend.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tInbound NAT rules: ") .append(resource.inboundNatRules().size()); for (LoadBalancerInboundNatRule natRule : resource.inboundNatRules().values()) { info.append("\n\t\tInbound NAT rule name: ").append(natRule.name()) .append("\n\t\t\tProtocol: ").append(natRule.protocol().toString()) .append("\n\t\t\tFrontend: ").append(natRule.frontend().name()) .append("\n\t\t\tFrontend port: ").append(natRule.frontendPort()) .append("\n\t\t\tBackend port: ").append(natRule.backendPort()) .append("\n\t\t\tBackend NIC ID: ").append(natRule.backendNetworkInterfaceId()) .append("\n\t\t\tBackend NIC IP config name: ").append(natRule.backendNicIpConfigurationName()) .append("\n\t\t\tFloating IP? ").append(natRule.floatingIPEnabled()) .append("\n\t\t\tIdle timeout in minutes: ").append(natRule.idleTimeoutInMinutes()); } info.append("\n\tInbound NAT pools: ") .append(resource.inboundNatPools().size()); for (LoadBalancerInboundNatPool natPool : resource.inboundNatPools().values()) { info.append("\n\t\tInbound NAT pool name: ").append(natPool.name()) .append("\n\t\t\tProtocol: ").append(natPool.protocol().toString()) .append("\n\t\t\tFrontend: ").append(natPool.frontend().name()) .append("\n\t\t\tFrontend port range: ") .append(natPool.frontendPortRangeStart()) .append("-") .append(natPool.frontendPortRangeEnd()) .append("\n\t\t\tBackend port: ").append(natPool.backendPort()); } info.append("\n\tBackends: ") .append(resource.backends().size()); for (LoadBalancerBackend backend : resource.backends().values()) { info.append("\n\t\tBackend name: ").append(backend.name()); info.append("\n\t\t\tReferenced NICs: ") .append(backend.backendNicIPConfigurationNames().entrySet().size()); for (Map.Entry<String, String> entry : backend.backendNicIPConfigurationNames().entrySet()) { info.append("\n\t\t\t\tNIC ID: ").append(entry.getKey()) .append(" - IP Config: ").append(entry.getValue()); } Set<String> vmIds = backend.getVirtualMachineIds(); info.append("\n\t\t\tReferenced virtual machine ids: ") .append(vmIds.size()); for (String vmId : vmIds) { info.append("\n\t\t\t\tVM ID: ").append(vmId); } info.append("\n\t\t\tReferenced load balancing rules: ") .append(new ArrayList<String>(backend.loadBalancingRules().keySet())); } System.out.println(info.toString()); } /** * Print app service domain. * * @param resource an app service domain */ public static void print(AppServiceDomain resource) { StringBuilder builder = new StringBuilder().append("Domain: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tCreated time: ").append(resource.createdTime()) .append("\n\tExpiration time: ").append(resource.expirationTime()) .append("\n\tContact: "); Contact contact = resource.registrantContact(); if (contact == null) { builder = builder.append("Private"); } else { builder = builder.append("\n\t\tName: ").append(contact.nameFirst() + " " + contact.nameLast()); } builder = builder.append("\n\tName servers: "); for (String nameServer : resource.nameServers()) { builder = builder.append("\n\t\t" + nameServer); } System.out.println(builder.toString()); } /** * Print app service certificate order. * * @param resource an app service certificate order */ public static void print(AppServiceCertificateOrder resource) { StringBuilder builder = new StringBuilder().append("App service certificate order: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDistinguished name: ").append(resource.distinguishedName()) .append("\n\tProduct type: ").append(resource.productType()) .append("\n\tValid years: ").append(resource.validityInYears()) .append("\n\tStatus: ").append(resource.status()) .append("\n\tIssuance time: ").append(resource.lastCertificateIssuanceTime()) .append("\n\tSigned certificate: ").append(resource.signedCertificate() == null ? null : resource.signedCertificate().thumbprint()); System.out.println(builder.toString()); } /** * Print app service plan. * * @param resource an app service plan */ public static void print(AppServicePlan resource) { StringBuilder builder = new StringBuilder().append("App service certificate order: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tPricing tier: ").append(resource.pricingTier()); System.out.println(builder.toString()); } /** * Print a web app. * * @param resource a web app */ public static void print(WebAppBase resource) { StringBuilder builder = new StringBuilder().append("Web app: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tState: ").append(resource.state()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDefault hostname: ").append(resource.defaultHostname()) .append("\n\tApp service plan: ").append(resource.appServicePlanId()) .append("\n\tHost name bindings: "); for (HostnameBinding binding : resource.getHostnameBindings().values()) { builder = builder.append("\n\t\t" + binding.toString()); } builder = builder.append("\n\tSSL bindings: "); for (HostnameSslState binding : resource.hostnameSslStates().values()) { builder = builder.append("\n\t\t" + binding.name() + ": " + binding.sslState()); if (binding.sslState() != null && binding.sslState() != SslState.DISABLED) { builder = builder.append(" - " + binding.thumbprint()); } } builder = builder.append("\n\tApp settings: "); for (AppSetting setting : resource.getAppSettings().values()) { builder = builder.append("\n\t\t" + setting.key() + ": " + setting.value() + (setting.sticky() ? " - slot setting" : "")); } builder = builder.append("\n\tConnection strings: "); for (ConnectionString conn : resource.getConnectionStrings().values()) { builder = builder.append("\n\t\t" + conn.name() + ": " + conn.value() + " - " + conn.type() + (conn.sticky() ? " - slot setting" : "")); } System.out.println(builder.toString()); } /** * Print a web site. * * @param resource a web site */ public static void print(WebSiteBase resource) { StringBuilder builder = new StringBuilder().append("Web app: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tState: ").append(resource.state()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDefault hostname: ").append(resource.defaultHostname()) .append("\n\tApp service plan: ").append(resource.appServicePlanId()); builder = builder.append("\n\tSSL bindings: "); for (HostnameSslState binding : resource.hostnameSslStates().values()) { builder = builder.append("\n\t\t" + binding.name() + ": " + binding.sslState()); if (binding.sslState() != null && binding.sslState() != SslState.DISABLED) { builder = builder.append(" - " + binding.thumbprint()); } } System.out.println(builder.toString()); } /** * Print a traffic manager profile. * * @param profile a traffic manager profile */ public static void print(TrafficManagerProfile profile) { StringBuilder info = new StringBuilder(); info.append("Traffic Manager Profile: ").append(profile.id()) .append("\n\tName: ").append(profile.name()) .append("\n\tResource group: ").append(profile.resourceGroupName()) .append("\n\tRegion: ").append(profile.regionName()) .append("\n\tTags: ").append(profile.tags()) .append("\n\tDNSLabel: ").append(profile.dnsLabel()) .append("\n\tFQDN: ").append(profile.fqdn()) .append("\n\tTTL: ").append(profile.timeToLive()) .append("\n\tEnabled: ").append(profile.isEnabled()) .append("\n\tRoutingMethod: ").append(profile.trafficRoutingMethod()) .append("\n\tMonitor status: ").append(profile.monitorStatus()) .append("\n\tMonitoring port: ").append(profile.monitoringPort()) .append("\n\tMonitoring path: ").append(profile.monitoringPath()); Map<String, TrafficManagerAzureEndpoint> azureEndpoints = profile.azureEndpoints(); if (!azureEndpoints.isEmpty()) { info.append("\n\tAzure endpoints:"); int idx = 1; for (TrafficManagerAzureEndpoint endpoint : azureEndpoints.values()) { info.append("\n\t\tAzure endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tTarget resourceId: ").append(endpoint.targetAzureResourceId()) .append("\n\t\t\tTarget resourceType: ").append(endpoint.targetResourceType()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } Map<String, TrafficManagerExternalEndpoint> externalEndpoints = profile.externalEndpoints(); if (!externalEndpoints.isEmpty()) { info.append("\n\tExternal endpoints:"); int idx = 1; for (TrafficManagerExternalEndpoint endpoint : externalEndpoints.values()) { info.append("\n\t\tExternal endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tFQDN: ").append(endpoint.fqdn()) .append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } Map<String, TrafficManagerNestedProfileEndpoint> nestedProfileEndpoints = profile.nestedProfileEndpoints(); if (!nestedProfileEndpoints.isEmpty()) { info.append("\n\tNested profile endpoints:"); int idx = 1; for (TrafficManagerNestedProfileEndpoint endpoint : nestedProfileEndpoints.values()) { info.append("\n\t\tNested profile endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tNested profileId: ").append(endpoint.nestedProfileId()) .append("\n\t\t\tMinimum child threshold: ").append(endpoint.minimumChildEndpointCount()) .append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } System.out.println(info.toString()); } /** * Print a dns zone. * * @param dnsZone a dns zone */ public static void print(DnsZone dnsZone) { StringBuilder info = new StringBuilder(); info.append("DNS Zone: ").append(dnsZone.id()) .append("\n\tName (Top level domain): ").append(dnsZone.name()) .append("\n\tResource group: ").append(dnsZone.resourceGroupName()) .append("\n\tRegion: ").append(dnsZone.regionName()) .append("\n\tTags: ").append(dnsZone.tags()) .append("\n\tName servers:"); for (String nameServer : dnsZone.nameServers()) { info.append("\n\t\t").append(nameServer); } SoaRecordSet soaRecordSet = dnsZone.getSoaRecordSet(); SoaRecord soaRecord = soaRecordSet.record(); info.append("\n\tSOA Record:") .append("\n\t\tHost:").append(soaRecord.host()) .append("\n\t\tEmail:").append(soaRecord.email()) .append("\n\t\tExpire time (seconds):").append(soaRecord.expireTime()) .append("\n\t\tRefresh time (seconds):").append(soaRecord.refreshTime()) .append("\n\t\tRetry time (seconds):").append(soaRecord.retryTime()) .append("\n\t\tNegative response cache ttl (seconds):").append(soaRecord.minimumTtl()) .append("\n\t\tTTL (seconds):").append(soaRecordSet.timeToLive()); PagedIterable<ARecordSet> aRecordSets = dnsZone.aRecordSets().list(); info.append("\n\tA Record sets:"); for (ARecordSet aRecordSet : aRecordSets) { info.append("\n\t\tId: ").append(aRecordSet.id()) .append("\n\t\tName: ").append(aRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aRecordSet.timeToLive()) .append("\n\t\tIP v4 addresses: "); for (String ipAddress : aRecordSet.ipv4Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<AaaaRecordSet> aaaaRecordSets = dnsZone.aaaaRecordSets().list(); info.append("\n\tAAAA Record sets:"); for (AaaaRecordSet aaaaRecordSet : aaaaRecordSets) { info.append("\n\t\tId: ").append(aaaaRecordSet.id()) .append("\n\t\tName: ").append(aaaaRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aaaaRecordSet.timeToLive()) .append("\n\t\tIP v6 addresses: "); for (String ipAddress : aaaaRecordSet.ipv6Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cNameRecordSets().list(); info.append("\n\tCNAME Record sets:"); for (CnameRecordSet cnameRecordSet : cnameRecordSets) { info.append("\n\t\tId: ").append(cnameRecordSet.id()) .append("\n\t\tName: ").append(cnameRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(cnameRecordSet.timeToLive()) .append("\n\t\tCanonical name: ").append(cnameRecordSet.canonicalName()); } PagedIterable<MxRecordSet> mxRecordSets = dnsZone.mxRecordSets().list(); info.append("\n\tMX Record sets:"); for (MxRecordSet mxRecordSet : mxRecordSets) { info.append("\n\t\tId: ").append(mxRecordSet.id()) .append("\n\t\tName: ").append(mxRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(mxRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (MxRecord mxRecord : mxRecordSet.records()) { info.append("\n\t\t\tExchange server, Preference: ") .append(mxRecord.exchange()) .append(" ") .append(mxRecord.preference()); } } PagedIterable<NsRecordSet> nsRecordSets = dnsZone.nsRecordSets().list(); info.append("\n\tNS Record sets:"); for (NsRecordSet nsRecordSet : nsRecordSets) { info.append("\n\t\tId: ").append(nsRecordSet.id()) .append("\n\t\tName: ").append(nsRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(nsRecordSet.timeToLive()) .append("\n\t\tName servers: "); for (String nameServer : nsRecordSet.nameServers()) { info.append("\n\t\t\t").append(nameServer); } } PagedIterable<PtrRecordSet> ptrRecordSets = dnsZone.ptrRecordSets().list(); info.append("\n\tPTR Record sets:"); for (PtrRecordSet ptrRecordSet : ptrRecordSets) { info.append("\n\t\tId: ").append(ptrRecordSet.id()) .append("\n\t\tName: ").append(ptrRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(ptrRecordSet.timeToLive()) .append("\n\t\tTarget domain names: "); for (String domainNames : ptrRecordSet.targetDomainNames()) { info.append("\n\t\t\t").append(domainNames); } } PagedIterable<SrvRecordSet> srvRecordSets = dnsZone.srvRecordSets().list(); info.append("\n\tSRV Record sets:"); for (SrvRecordSet srvRecordSet : srvRecordSets) { info.append("\n\t\tId: ").append(srvRecordSet.id()) .append("\n\t\tName: ").append(srvRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(srvRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (SrvRecord srvRecord : srvRecordSet.records()) { info.append("\n\t\t\tTarget, Port, Priority, Weight: ") .append(srvRecord.target()) .append(", ") .append(srvRecord.port()) .append(", ") .append(srvRecord.priority()) .append(", ") .append(srvRecord.weight()); } } PagedIterable<TxtRecordSet> txtRecordSets = dnsZone.txtRecordSets().list(); info.append("\n\tTXT Record sets:"); for (TxtRecordSet txtRecordSet : txtRecordSets) { info.append("\n\t\tId: ").append(txtRecordSet.id()) .append("\n\t\tName: ").append(txtRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(txtRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (TxtRecord txtRecord : txtRecordSet.records()) { if (txtRecord.value().size() > 0) { info.append("\n\t\t\tValue: ").append(txtRecord.value().get(0)); } } } System.out.println(info.toString()); } /** * Print a private dns zone. * * @param privateDnsZone a private dns zone */ public static void print(PrivateDnsZone privateDnsZone) { StringBuilder info = new StringBuilder(); info.append("Private DNS Zone: ").append(privateDnsZone.id()) .append("\n\tName (Top level domain): ").append(privateDnsZone.name()) .append("\n\tResource group: ").append(privateDnsZone.resourceGroupName()) .append("\n\tRegion: ").append(privateDnsZone.regionName()) .append("\n\tTags: ").append(privateDnsZone.tags()) .append("\n\tName servers:"); com.azure.resourcemanager.privatedns.models.SoaRecordSet soaRecordSet = privateDnsZone.getSoaRecordSet(); com.azure.resourcemanager.privatedns.models.SoaRecord soaRecord = soaRecordSet.record(); info.append("\n\tSOA Record:") .append("\n\t\tHost:").append(soaRecord.host()) .append("\n\t\tEmail:").append(soaRecord.email()) .append("\n\t\tExpire time (seconds):").append(soaRecord.expireTime()) .append("\n\t\tRefresh time (seconds):").append(soaRecord.refreshTime()) .append("\n\t\tRetry time (seconds):").append(soaRecord.retryTime()) .append("\n\t\tNegative response cache ttl (seconds):").append(soaRecord.minimumTtl()) .append("\n\t\tTTL (seconds):").append(soaRecordSet.timeToLive()); PagedIterable<com.azure.resourcemanager.privatedns.models.ARecordSet> aRecordSets = privateDnsZone .aRecordSets().list(); info.append("\n\tA Record sets:"); for (com.azure.resourcemanager.privatedns.models.ARecordSet aRecordSet : aRecordSets) { info.append("\n\t\tId: ").append(aRecordSet.id()) .append("\n\t\tName: ").append(aRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aRecordSet.timeToLive()) .append("\n\t\tIP v4 addresses: "); for (String ipAddress : aRecordSet.ipv4Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<com.azure.resourcemanager.privatedns.models.AaaaRecordSet> aaaaRecordSets = privateDnsZone .aaaaRecordSets().list(); info.append("\n\tAAAA Record sets:"); for (com.azure.resourcemanager.privatedns.models.AaaaRecordSet aaaaRecordSet : aaaaRecordSets) { info.append("\n\t\tId: ").append(aaaaRecordSet.id()) .append("\n\t\tName: ").append(aaaaRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aaaaRecordSet.timeToLive()) .append("\n\t\tIP v6 addresses: "); for (String ipAddress : aaaaRecordSet.ipv6Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<com.azure.resourcemanager.privatedns.models.CnameRecordSet> cnameRecordSets = privateDnsZone.cnameRecordSets().list(); info.append("\n\tCNAME Record sets:"); for (com.azure.resourcemanager.privatedns.models.CnameRecordSet cnameRecordSet : cnameRecordSets) { info.append("\n\t\tId: ").append(cnameRecordSet.id()) .append("\n\t\tName: ").append(cnameRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(cnameRecordSet.timeToLive()) .append("\n\t\tCanonical name: ").append(cnameRecordSet.canonicalName()); } PagedIterable<com.azure.resourcemanager.privatedns.models.MxRecordSet> mxRecordSets = privateDnsZone.mxRecordSets().list(); info.append("\n\tMX Record sets:"); for (com.azure.resourcemanager.privatedns.models.MxRecordSet mxRecordSet : mxRecordSets) { info.append("\n\t\tId: ").append(mxRecordSet.id()) .append("\n\t\tName: ").append(mxRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(mxRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.MxRecord mxRecord : mxRecordSet.records()) { info.append("\n\t\t\tExchange server, Preference: ") .append(mxRecord.exchange()) .append(" ") .append(mxRecord.preference()); } } PagedIterable<com.azure.resourcemanager.privatedns.models.PtrRecordSet> ptrRecordSets = privateDnsZone .ptrRecordSets().list(); info.append("\n\tPTR Record sets:"); for (com.azure.resourcemanager.privatedns.models.PtrRecordSet ptrRecordSet : ptrRecordSets) { info.append("\n\t\tId: ").append(ptrRecordSet.id()) .append("\n\t\tName: ").append(ptrRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(ptrRecordSet.timeToLive()) .append("\n\t\tTarget domain names: "); for (String domainNames : ptrRecordSet.targetDomainNames()) { info.append("\n\t\t\t").append(domainNames); } } PagedIterable<com.azure.resourcemanager.privatedns.models.SrvRecordSet> srvRecordSets = privateDnsZone .srvRecordSets().list(); info.append("\n\tSRV Record sets:"); for (com.azure.resourcemanager.privatedns.models.SrvRecordSet srvRecordSet : srvRecordSets) { info.append("\n\t\tId: ").append(srvRecordSet.id()) .append("\n\t\tName: ").append(srvRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(srvRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.SrvRecord srvRecord : srvRecordSet.records()) { info.append("\n\t\t\tTarget, Port, Priority, Weight: ") .append(srvRecord.target()) .append(", ") .append(srvRecord.port()) .append(", ") .append(srvRecord.priority()) .append(", ") .append(srvRecord.weight()); } } PagedIterable<com.azure.resourcemanager.privatedns.models.TxtRecordSet> txtRecordSets = privateDnsZone .txtRecordSets().list(); info.append("\n\tTXT Record sets:"); for (com.azure.resourcemanager.privatedns.models.TxtRecordSet txtRecordSet : txtRecordSets) { info.append("\n\t\tId: ").append(txtRecordSet.id()) .append("\n\t\tName: ").append(txtRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(txtRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.TxtRecord txtRecord : txtRecordSet.records()) { if (txtRecord.value().size() > 0) { info.append("\n\t\t\tValue: ").append(txtRecord.value().get(0)); } } } PagedIterable<VirtualNetworkLink> virtualNetworkLinks = privateDnsZone.virtualNetworkLinks().list(); info.append("\n\tVirtual Network Links:"); for (VirtualNetworkLink virtualNetworkLink : virtualNetworkLinks) { info.append("\n\tId: ").append(virtualNetworkLink.id()) .append("\n\tName: ").append(virtualNetworkLink.name()) .append("\n\tReference of Virtual Network: ").append(virtualNetworkLink.referencedVirtualNetworkId()) .append("\n\tRegistration enabled: ").append(virtualNetworkLink.isAutoRegistrationEnabled()); } System.out.println(info.toString()); } /** * Print an Azure Container Registry. * * @param azureRegistry an Azure Container Registry */ public static void print(Registry azureRegistry) { StringBuilder info = new StringBuilder(); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); info.append("Azure Container Registry: ").append(azureRegistry.id()) .append("\n\tName: ").append(azureRegistry.name()) .append("\n\tServer Url: ").append(azureRegistry.loginServerUrl()) .append("\n\tUser: ").append(acrCredentials.username()) .append("\n\tFirst Password: ").append(acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)) .append("\n\tSecond Password: ").append(acrCredentials.accessKeys().get(AccessKeyType.SECONDARY)); System.out.println(info.toString()); } /** * Print an Azure Container Service (AKS). * * @param kubernetesCluster a managed container service */ public static void print(KubernetesCluster kubernetesCluster) { StringBuilder info = new StringBuilder(); info.append("Azure Container Service: ").append(kubernetesCluster.id()) .append("\n\tName: ").append(kubernetesCluster.name()) .append("\n\tFQDN: ").append(kubernetesCluster.fqdn()) .append("\n\tDNS prefix label: ").append(kubernetesCluster.dnsPrefix()) .append("\n\t\tWith Agent pool name: ").append(new ArrayList<>(kubernetesCluster.agentPools().keySet()).get(0)) .append("\n\t\tAgent pool count: ").append(new ArrayList<>(kubernetesCluster.agentPools().values()).get(0).count()) .append("\n\t\tAgent pool VM size: ").append(new ArrayList<>(kubernetesCluster.agentPools().values()).get(0).vmSize().toString()) .append("\n\tLinux user name: ").append(kubernetesCluster.linuxRootUsername()) .append("\n\tSSH key: ").append(kubernetesCluster.sshKey()) .append("\n\tService principal client ID: ").append(kubernetesCluster.servicePrincipalClientId()); System.out.println(info.toString()); } /** * Print an Azure Search Service. * * @param searchService an Azure Search Service */ public static void print(SearchService searchService) { StringBuilder info = new StringBuilder(); AdminKeys adminKeys = searchService.getAdminKeys(); PagedIterable<QueryKey> queryKeys = searchService.listQueryKeys(); info.append("Azure Search: ").append(searchService.id()) .append("\n\tResource group: ").append(searchService.resourceGroupName()) .append("\n\tRegion: ").append(searchService.region()) .append("\n\tTags: ").append(searchService.tags()) .append("\n\tSku: ").append(searchService.sku().name()) .append("\n\tStatus: ").append(searchService.status()) .append("\n\tProvisioning State: ").append(searchService.provisioningState()) .append("\n\tHosting Mode: ").append(searchService.hostingMode()) .append("\n\tReplicas: ").append(searchService.replicaCount()) .append("\n\tPartitions: ").append(searchService.partitionCount()) .append("\n\tPrimary Admin Key: ").append(adminKeys.primaryKey()) .append("\n\tSecondary Admin Key: ").append(adminKeys.secondaryKey()) .append("\n\tQuery keys:"); for (QueryKey queryKey : queryKeys) { info.append("\n\t\tKey name: ").append(queryKey.name()); info.append("\n\t\t Value: ").append(queryKey.key()); } System.out.println(info.toString()); } /** * Retrieve the secondary service principal client ID. * * @param envSecondaryServicePrincipal an Azure Container Registry * @return a service principal client ID * @throws IOException exception */
class Utils { private static final ClientLogger LOGGER = new ClientLogger(Utils.class); private static String sshPublicKey; private Utils() { } /** @return a generated password */ public static String password() { String password = new ResourceManagerUtils.InternalRuntimeContext().randomResourceName("Pa5$", 12); System.out.printf("Password: %s%n", password); return password; } /** * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } /** * Creates a randomized resource name. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @param prefix the prefix to the name. * @param maxLen the max length of the name. * @return the randomized resource name. */ public static String randomResourceName(AzureResourceManager azure, String prefix, int maxLen) { return azure.resourceGroups().manager().internalContext().randomResourceName(prefix, maxLen); } /** * Generates the specified number of random resource names with the same prefix. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @param prefix the prefix to be used if possible * @param maxLen the maximum length for the random generated name * @param count the number of names to generate * @return the randomized resource names. */ public static String[] randomResourceNames(AzureResourceManager azure, String prefix, int maxLen, int count) { String[] names = new String[count]; for (int i = 0; i < count; i++) { names[i] = randomResourceName(azure, prefix, maxLen); } return names; } /** * Creates a random UUID. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @return the random UUID. */ public static String randomUuid(AzureResourceManager azure) { return azure.resourceGroups().manager().internalContext().randomUuid(); } /** * Creates a randomized resource name. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param authenticated the AzureResourceManager.Authenticated instance. * @param prefix the prefix to the name. * @param maxLen the max length of the name. * @return the randomized resource name. */ public static String randomResourceName(AzureResourceManager.Authenticated authenticated, String prefix, int maxLen) { return authenticated.roleAssignments().manager().internalContext().randomResourceName(prefix, maxLen); } /** * Print resource group info. * * @param resource a resource group */ public static void print(ResourceGroup resource) { StringBuilder info = new StringBuilder(); info.append("Resource Group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()); System.out.println(info.toString()); } /** * Print User Assigned MSI info. * * @param resource a User Assigned MSI */ public static void print(Identity resource) { StringBuilder info = new StringBuilder(); info.append("Resource Group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tService Principal Id: ").append(resource.principalId()) .append("\n\tClient Id: ").append(resource.clientId()) .append("\n\tTenant Id: ").append(resource.tenantId()); System.out.println(info.toString()); } /** * Print virtual machine info. * * @param resource a virtual machine */ public static void print(VirtualMachine resource) { StringBuilder storageProfile = new StringBuilder().append("\n\tStorageProfile: "); if (resource.storageProfile().imageReference() != null) { storageProfile.append("\n\t\tImageReference:"); storageProfile.append("\n\t\t\tPublisher: ").append(resource.storageProfile().imageReference().publisher()); storageProfile.append("\n\t\t\tOffer: ").append(resource.storageProfile().imageReference().offer()); storageProfile.append("\n\t\t\tSKU: ").append(resource.storageProfile().imageReference().sku()); storageProfile.append("\n\t\t\tVersion: ").append(resource.storageProfile().imageReference().version()); } if (resource.storageProfile().osDisk() != null) { storageProfile.append("\n\t\tOSDisk:"); storageProfile.append("\n\t\t\tOSType: ").append(resource.storageProfile().osDisk().osType()); storageProfile.append("\n\t\t\tName: ").append(resource.storageProfile().osDisk().name()); storageProfile.append("\n\t\t\tCaching: ").append(resource.storageProfile().osDisk().caching()); storageProfile.append("\n\t\t\tCreateOption: ").append(resource.storageProfile().osDisk().createOption()); storageProfile.append("\n\t\t\tDiskSizeGB: ").append(resource.storageProfile().osDisk().diskSizeGB()); if (resource.storageProfile().osDisk().managedDisk() != null) { if (resource.storageProfile().osDisk().managedDisk().diskEncryptionSet() != null) { storageProfile.append("\n\t\t\tDiskEncryptionSet Id: ") .append(resource.storageProfile().osDisk().managedDisk().diskEncryptionSet().id()); } } if (resource.storageProfile().osDisk().image() != null) { storageProfile.append("\n\t\t\tImage Uri: ").append(resource.storageProfile().osDisk().image().uri()); } if (resource.storageProfile().osDisk().vhd() != null) { storageProfile.append("\n\t\t\tVhd Uri: ").append(resource.storageProfile().osDisk().vhd().uri()); } if (resource.storageProfile().osDisk().encryptionSettings() != null) { storageProfile.append("\n\t\t\tEncryptionSettings: "); storageProfile.append("\n\t\t\t\tEnabled: ").append(resource.storageProfile().osDisk().encryptionSettings().enabled()); storageProfile.append("\n\t\t\t\tDiskEncryptionKey Uri: ").append(resource .storageProfile() .osDisk() .encryptionSettings() .diskEncryptionKey().secretUrl()); storageProfile.append("\n\t\t\t\tKeyEncryptionKey Uri: ").append(resource .storageProfile() .osDisk() .encryptionSettings() .keyEncryptionKey().keyUrl()); } } if (resource.storageProfile().dataDisks() != null) { int i = 0; for (DataDisk disk : resource.storageProfile().dataDisks()) { storageProfile.append("\n\t\tDataDisk: storageProfile.append("\n\t\t\tName: ").append(disk.name()); storageProfile.append("\n\t\t\tCaching: ").append(disk.caching()); storageProfile.append("\n\t\t\tCreateOption: ").append(disk.createOption()); storageProfile.append("\n\t\t\tDiskSizeGB: ").append(disk.diskSizeGB()); storageProfile.append("\n\t\t\tLun: ").append(disk.lun()); if (resource.isManagedDiskEnabled()) { if (disk.managedDisk() != null) { storageProfile.append("\n\t\t\tManaged Disk Id: ").append(disk.managedDisk().id()); if (disk.managedDisk().diskEncryptionSet() != null) { storageProfile.append("\n\t\t\tDiskEncryptionSet Id: ").append(disk.managedDisk().diskEncryptionSet().id()); } } } else { if (disk.vhd().uri() != null) { storageProfile.append("\n\t\t\tVhd Uri: ").append(disk.vhd().uri()); } } if (disk.image() != null) { storageProfile.append("\n\t\t\tImage Uri: ").append(disk.image().uri()); } } } StringBuilder osProfile = new StringBuilder().append("\n\tOSProfile: "); if (resource.osProfile() != null) { osProfile.append("\n\t\tComputerName:").append(resource.osProfile().computerName()); if (resource.osProfile().windowsConfiguration() != null) { osProfile.append("\n\t\t\tWindowsConfiguration: "); osProfile.append("\n\t\t\t\tProvisionVMAgent: ") .append(resource.osProfile().windowsConfiguration().provisionVMAgent()); osProfile.append("\n\t\t\t\tEnableAutomaticUpdates: ") .append(resource.osProfile().windowsConfiguration().enableAutomaticUpdates()); osProfile.append("\n\t\t\t\tTimeZone: ") .append(resource.osProfile().windowsConfiguration().timeZone()); } if (resource.osProfile().linuxConfiguration() != null) { osProfile.append("\n\t\t\tLinuxConfiguration: "); osProfile.append("\n\t\t\t\tDisablePasswordAuthentication: ") .append(resource.osProfile().linuxConfiguration().disablePasswordAuthentication()); } } else { osProfile.append("null"); } StringBuilder networkProfile = new StringBuilder().append("\n\tNetworkProfile: "); for (String networkInterfaceId : resource.networkInterfaceIds()) { networkProfile.append("\n\t\tId:").append(networkInterfaceId); } StringBuilder extensions = new StringBuilder().append("\n\tExtensions: "); for (Map.Entry<String, VirtualMachineExtension> extensionEntry : resource.listExtensions().entrySet()) { VirtualMachineExtension extension = extensionEntry.getValue(); extensions.append("\n\t\tExtension: ").append(extension.id()) .append("\n\t\t\tName: ").append(extension.name()) .append("\n\t\t\tTags: ").append(extension.tags()) .append("\n\t\t\tProvisioningState: ").append(extension.provisioningState()) .append("\n\t\t\tAuto upgrade minor version enabled: ").append(extension.autoUpgradeMinorVersionEnabled()) .append("\n\t\t\tPublisher: ").append(extension.publisherName()) .append("\n\t\t\tType: ").append(extension.typeName()) .append("\n\t\t\tVersion: ").append(extension.versionName()) .append("\n\t\t\tPublic Settings: ").append(extension.publicSettingsAsJsonString()); } StringBuilder msi = new StringBuilder().append("\n\tMSI: "); msi.append("\n\t\t\tMSI enabled:").append(resource.isManagedServiceIdentityEnabled()); msi.append("\n\t\t\tSystem Assigned MSI Active Directory Service Principal Id:").append(resource.systemAssignedManagedServiceIdentityPrincipalId()); msi.append("\n\t\t\tSystem Assigned MSI Active Directory Tenant Id:").append(resource.systemAssignedManagedServiceIdentityTenantId()); StringBuilder zones = new StringBuilder().append("\n\tZones: "); zones.append(resource.availabilityZones()); StringBuilder securityProfile = new StringBuilder().append("\n\tSecurityProfile: "); securityProfile.append("\n\t\t\tSecurity type: ").append(resource.securityType()); securityProfile.append("\n\t\t\tSecure Boot enabled: ").append(resource.isSecureBootEnabled()); securityProfile.append("\n\t\t\tvTPM enabled: ").append(resource.isVTpmEnabled()); System.out.println(new StringBuilder().append("Virtual Machine: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tHardwareProfile: ") .append("\n\t\tSize: ").append(resource.size()) .append(storageProfile) .append(osProfile) .append(networkProfile) .append(extensions) .append(msi) .append(zones) .append(securityProfile) .toString()); } /** * Print availability set info. * * @param resource an availability set */ public static void print(AvailabilitySet resource) { System.out.println(new StringBuilder().append("Availability Set: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tFault domain count: ").append(resource.faultDomainCount()) .append("\n\tUpdate domain count: ").append(resource.updateDomainCount()) .toString()); } /** * Print network info. * * @param resource a network * @throws ManagementException Cloud errors */ public static void print(Network resource) { StringBuilder info = new StringBuilder(); info.append("Network: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tAddress spaces: ").append(resource.addressSpaces()) .append("\n\tDNS server IPs: ").append(resource.dnsServerIPs()); for (Subnet subnet : resource.subnets().values()) { info.append("\n\tSubnet: ").append(subnet.name()) .append("\n\t\tAddress prefix: ").append(subnet.addressPrefix()); NetworkSecurityGroup subnetNsg = subnet.getNetworkSecurityGroup(); if (subnetNsg != null) { info.append("\n\t\tNetwork security group ID: ").append(subnetNsg.id()); } RouteTable routeTable = subnet.getRouteTable(); if (routeTable != null) { info.append("\n\tRoute table ID: ").append(routeTable.id()); } Map<ServiceEndpointType, List<Region>> services = subnet.servicesWithAccess(); if (services.size() > 0) { info.append("\n\tServices with access"); for (Map.Entry<ServiceEndpointType, List<Region>> service : services.entrySet()) { info.append("\n\t\tService: ") .append(service.getKey()) .append(" Regions: " + service.getValue() + ""); } } } for (NetworkPeering peering : resource.peerings().list()) { info.append("\n\tPeering: ").append(peering.name()) .append("\n\t\tRemote network ID: ").append(peering.remoteNetworkId()) .append("\n\t\tPeering state: ").append(peering.state()) .append("\n\t\tIs traffic forwarded from remote network allowed? ").append(peering.isTrafficForwardingFromRemoteNetworkAllowed()) .append("\n\t\tGateway use: ").append(peering.gatewayUse()); } System.out.println(info.toString()); } /** * Print network interface. * * @param resource a network interface */ public static void print(NetworkInterface resource) { StringBuilder info = new StringBuilder(); info.append("NetworkInterface: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tInternal DNS name label: ").append(resource.internalDnsNameLabel()) .append("\n\tInternal FQDN: ").append(resource.internalFqdn()) .append("\n\tInternal domain name suffix: ").append(resource.internalDomainNameSuffix()) .append("\n\tNetwork security group: ").append(resource.networkSecurityGroupId()) .append("\n\tApplied DNS servers: ").append(resource.appliedDnsServers().toString()) .append("\n\tDNS server IPs: "); for (String dnsServerIp : resource.dnsServers()) { info.append("\n\t\t").append(dnsServerIp); } info.append("\n\tIP forwarding enabled? ").append(resource.isIPForwardingEnabled()) .append("\n\tAccelerated networking enabled? ").append(resource.isAcceleratedNetworkingEnabled()) .append("\n\tMAC Address:").append(resource.macAddress()) .append("\n\tPrivate IP:").append(resource.primaryPrivateIP()) .append("\n\tPrivate allocation method:").append(resource.primaryPrivateIpAllocationMethod()) .append("\n\tPrimary virtual network ID: ").append(resource.primaryIPConfiguration().networkId()) .append("\n\tPrimary subnet name:").append(resource.primaryIPConfiguration().subnetName()); System.out.println(info.toString()); } /** * Print network security group. * * @param resource a network security group */ public static void print(NetworkSecurityGroup resource) { StringBuilder info = new StringBuilder(); info.append("NSG: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()); for (NetworkSecurityRule rule : resource.securityRules().values()) { info.append("\n\tRule: ").append(rule.name()) .append("\n\t\tAccess: ").append(rule.access()) .append("\n\t\tDirection: ").append(rule.direction()) .append("\n\t\tFrom address: ").append(rule.sourceAddressPrefix()) .append("\n\t\tFrom port range: ").append(rule.sourcePortRange()) .append("\n\t\tTo address: ").append(rule.destinationAddressPrefix()) .append("\n\t\tTo port: ").append(rule.destinationPortRange()) .append("\n\t\tProtocol: ").append(rule.protocol()) .append("\n\t\tPriority: ").append(rule.priority()); } System.out.println(info.toString()); } /** * Print public IP address. * * @param resource a public IP address */ public static void print(PublicIpAddress resource) { System.out.println(new StringBuilder().append("Public IP Address: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tIP Address: ").append(resource.ipAddress()) .append("\n\tLeaf domain label: ").append(resource.leafDomainLabel()) .append("\n\tFQDN: ").append(resource.fqdn()) .append("\n\tReverse FQDN: ").append(resource.reverseFqdn()) .append("\n\tIdle timeout (minutes): ").append(resource.idleTimeoutInMinutes()) .append("\n\tIP allocation method: ").append(resource.ipAllocationMethod()) .append("\n\tZones: ").append(resource.availabilityZones()) .toString()); } /** * Print a key vault. * * @param vault the key vault resource */ public static void print(Vault vault) { StringBuilder info = new StringBuilder().append("Key Vault: ").append(vault.id()) .append("Name: ").append(vault.name()) .append("\n\tResource group: ").append(vault.resourceGroupName()) .append("\n\tRegion: ").append(vault.region()) .append("\n\tSku: ").append(vault.sku().name()).append(" - ").append(vault.sku().family()) .append("\n\tVault URI: ").append(vault.vaultUri()) .append("\n\tAccess policies: "); for (AccessPolicy accessPolicy : vault.accessPolicies()) { info.append("\n\t\tIdentity:").append(accessPolicy.objectId()); if (accessPolicy.permissions() != null) { if (accessPolicy.permissions().keys() != null) { info.append("\n\t\tKey permissions: ").append(accessPolicy.permissions().keys().stream().map(KeyPermissions::toString).collect(Collectors.joining(", "))); } if (accessPolicy.permissions().secrets() != null) { info.append("\n\t\tSecret permissions: ").append(accessPolicy.permissions().secrets().stream().map(SecretPermissions::toString).collect(Collectors.joining(", "))); } if (accessPolicy.permissions().certificates() != null) { info.append("\n\t\tCertificate permissions: ").append(accessPolicy.permissions().certificates().stream().map(CertificatePermissions::toString).collect(Collectors.joining(", "))); } } } System.out.println(info.toString()); } /** * Print storage account. * * @param storageAccount a storage account */ public static void print(StorageAccount storageAccount) { System.out.println(storageAccount.name() + " created @ " + storageAccount.creationTime()); StringBuilder info = new StringBuilder().append("Storage Account: ").append(storageAccount.id()) .append("Name: ").append(storageAccount.name()) .append("\n\tResource group: ").append(storageAccount.resourceGroupName()) .append("\n\tRegion: ").append(storageAccount.region()) .append("\n\tSKU: ").append(storageAccount.skuType().name().toString()) .append("\n\tAccessTier: ").append(storageAccount.accessTier()) .append("\n\tKind: ").append(storageAccount.kind()); info.append("\n\tNetwork Rule Configuration: ") .append("\n\t\tAllow reading logs from any network: ").append(storageAccount.canReadLogEntriesFromAnyNetwork()) .append("\n\t\tAllow reading metrics from any network: ").append(storageAccount.canReadMetricsFromAnyNetwork()) .append("\n\t\tAllow access from all azure services: ").append(storageAccount.canAccessFromAzureServices()); if (storageAccount.networkSubnetsWithAccess().size() > 0) { info.append("\n\t\tNetwork subnets with access: "); for (String subnetId : storageAccount.networkSubnetsWithAccess()) { info.append("\n\t\t\t").append(subnetId); } } if (storageAccount.ipAddressesWithAccess().size() > 0) { info.append("\n\t\tIP addresses with access: "); for (String ipAddress : storageAccount.ipAddressesWithAccess()) { info.append("\n\t\t\t").append(ipAddress); } } if (storageAccount.ipAddressRangesWithAccess().size() > 0) { info.append("\n\t\tIP address-ranges with access: "); for (String ipAddressRange : storageAccount.ipAddressRangesWithAccess()) { info.append("\n\t\t\t").append(ipAddressRange); } } info.append("\n\t\tTraffic allowed from only HTTPS: ").append(storageAccount.innerModel().enableHttpsTrafficOnly()); info.append("\n\tEncryption status: "); info.append("\n\t\tInfrastructure Encryption: ").append(storageAccount.infrastructureEncryptionEnabled() ? "Enabled" : "Disabled"); for (Map.Entry<StorageService, StorageAccountEncryptionStatus> eStatus : storageAccount.encryptionStatuses().entrySet()) { info.append("\n\t\t").append(eStatus.getValue().storageService()).append(": ").append(eStatus.getValue().isEnabled() ? "Enabled" : "Disabled"); } System.out.println(info.toString()); } /** * Print storage account keys. * * @param storageAccountKeys a list of storage account keys */ public static void print(List<StorageAccountKey> storageAccountKeys) { for (int i = 0; i < storageAccountKeys.size(); i++) { StorageAccountKey storageAccountKey = storageAccountKeys.get(i); System.out.println("Key (" + i + ") " + storageAccountKey.keyName() + "=" + storageAccountKey.value()); } } /** * Print Redis Cache. * * @param redisCache a Redis cache. */ public static void print(RedisCache redisCache) { StringBuilder redisInfo = new StringBuilder() .append("Redis Cache Name: ").append(redisCache.name()) .append("\n\tResource group: ").append(redisCache.resourceGroupName()) .append("\n\tRegion: ").append(redisCache.region()) .append("\n\tSKU Name: ").append(redisCache.sku().name()) .append("\n\tSKU Family: ").append(redisCache.sku().family()) .append("\n\tHostname: ").append(redisCache.hostname()) .append("\n\tSSL port: ").append(redisCache.sslPort()) .append("\n\tNon-SSL port (6379) enabled: ").append(redisCache.nonSslPort()); if (redisCache.redisConfiguration() != null && !redisCache.redisConfiguration().isEmpty()) { redisInfo.append("\n\tRedis Configuration:"); for (Map.Entry<String, String> redisConfiguration : redisCache.redisConfiguration().entrySet()) { redisInfo.append("\n\t '").append(redisConfiguration.getKey()) .append("' : '").append(redisConfiguration.getValue()).append("'"); } } if (redisCache.isPremium()) { RedisCachePremium premium = redisCache.asPremium(); List<ScheduleEntry> scheduleEntries = premium.listPatchSchedules(); if (scheduleEntries != null && !scheduleEntries.isEmpty()) { redisInfo.append("\n\tRedis Patch Schedule:"); for (ScheduleEntry schedule : scheduleEntries) { redisInfo.append("\n\t\tDay: '").append(schedule.dayOfWeek()) .append("', start at: '").append(schedule.startHourUtc()) .append("', maintenance window: '").append(schedule.maintenanceWindow()) .append("'"); } } } System.out.println(redisInfo.toString()); } /** * Print Redis Cache access keys. * * @param redisAccessKeys a keys for Redis Cache */ public static void print(RedisAccessKeys redisAccessKeys) { StringBuilder redisKeys = new StringBuilder() .append("Redis Access Keys: ") .append("\n\tPrimary Key: '").append(redisAccessKeys.primaryKey()).append("', ") .append("\n\tSecondary Key: '").append(redisAccessKeys.secondaryKey()).append("', "); System.out.println(redisKeys.toString()); } /** * Print management lock. * * @param lock a management lock */ public static void print(ManagementLock lock) { StringBuilder info = new StringBuilder(); info.append("\nLock ID: ").append(lock.id()) .append("\nLocked resource ID: ").append(lock.lockedResourceId()) .append("\nLevel: ").append(lock.level()); System.out.println(info.toString()); } /** * Print load balancer. * * @param resource a load balancer */ public static void print(LoadBalancer resource) { StringBuilder info = new StringBuilder(); info.append("Load balancer: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tBackends: ").append(resource.backends().keySet().toString()); info.append("\n\tPublic IP address IDs: ") .append(resource.publicIpAddressIds().size()); for (String pipId : resource.publicIpAddressIds()) { info.append("\n\t\tPIP id: ").append(pipId); } info.append("\n\tTCP probes: ") .append(resource.tcpProbes().size()); for (LoadBalancerTcpProbe probe : resource.tcpProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tHTTP probes: ") .append(resource.httpProbes().size()); for (LoadBalancerHttpProbe probe : resource.httpProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()) .append("\n\t\t\tHTTP request path: ").append(probe.requestPath()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tHTTPS probes: ") .append(resource.httpsProbes().size()); for (LoadBalancerHttpProbe probe : resource.httpsProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()) .append("\n\t\t\tHTTPS request path: ").append(probe.requestPath()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tLoad balancing rules: ") .append(resource.loadBalancingRules().size()); for (LoadBalancingRule rule : resource.loadBalancingRules().values()) { info.append("\n\t\tLB rule name: ").append(rule.name()) .append("\n\t\t\tProtocol: ").append(rule.protocol()) .append("\n\t\t\tFloating IP enabled? ").append(rule.floatingIPEnabled()) .append("\n\t\t\tIdle timeout in minutes: ").append(rule.idleTimeoutInMinutes()) .append("\n\t\t\tLoad distribution method: ").append(rule.loadDistribution().toString()); LoadBalancerFrontend frontend = rule.frontend(); info.append("\n\t\t\tFrontend: "); if (frontend != null) { info.append(frontend.name()); } else { info.append("(None)"); } info.append("\n\t\t\tFrontend port: ").append(rule.frontendPort()); LoadBalancerBackend backend = rule.backend(); info.append("\n\t\t\tBackend: "); if (backend != null) { info.append(backend.name()); } else { info.append("(None)"); } info.append("\n\t\t\tBackend port: ").append(rule.backendPort()); LoadBalancerProbe probe = rule.probe(); info.append("\n\t\t\tProbe: "); if (probe == null) { info.append("(None)"); } else { info.append(probe.name()).append(" [").append(probe.protocol().toString()).append("]"); } } info.append("\n\tFrontends: ") .append(resource.frontends().size()); for (LoadBalancerFrontend frontend : resource.frontends().values()) { info.append("\n\t\tFrontend name: ").append(frontend.name()) .append("\n\t\t\tInternet facing: ").append(frontend.isPublic()); if (frontend.isPublic()) { info.append("\n\t\t\tPublic IP Address ID: ").append(((LoadBalancerPublicFrontend) frontend).publicIpAddressId()); } else { info.append("\n\t\t\tVirtual network ID: ").append(((LoadBalancerPrivateFrontend) frontend).networkId()) .append("\n\t\t\tSubnet name: ").append(((LoadBalancerPrivateFrontend) frontend).subnetName()) .append("\n\t\t\tPrivate IP address: ").append(((LoadBalancerPrivateFrontend) frontend).privateIpAddress()) .append("\n\t\t\tPrivate IP allocation method: ").append(((LoadBalancerPrivateFrontend) frontend).privateIpAllocationMethod()); } info.append("\n\t\t\tReferenced inbound NAT pools: ") .append(frontend.inboundNatPools().size()); for (LoadBalancerInboundNatPool pool : frontend.inboundNatPools().values()) { info.append("\n\t\t\t\tName: ").append(pool.name()); } info.append("\n\t\t\tReferenced inbound NAT rules: ") .append(frontend.inboundNatRules().size()); for (LoadBalancerInboundNatRule rule : frontend.inboundNatRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } info.append("\n\t\t\tReferenced load balancing rules: ") .append(frontend.loadBalancingRules().size()); for (LoadBalancingRule rule : frontend.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tInbound NAT rules: ") .append(resource.inboundNatRules().size()); for (LoadBalancerInboundNatRule natRule : resource.inboundNatRules().values()) { info.append("\n\t\tInbound NAT rule name: ").append(natRule.name()) .append("\n\t\t\tProtocol: ").append(natRule.protocol().toString()) .append("\n\t\t\tFrontend: ").append(natRule.frontend().name()) .append("\n\t\t\tFrontend port: ").append(natRule.frontendPort()) .append("\n\t\t\tBackend port: ").append(natRule.backendPort()) .append("\n\t\t\tBackend NIC ID: ").append(natRule.backendNetworkInterfaceId()) .append("\n\t\t\tBackend NIC IP config name: ").append(natRule.backendNicIpConfigurationName()) .append("\n\t\t\tFloating IP? ").append(natRule.floatingIPEnabled()) .append("\n\t\t\tIdle timeout in minutes: ").append(natRule.idleTimeoutInMinutes()); } info.append("\n\tInbound NAT pools: ") .append(resource.inboundNatPools().size()); for (LoadBalancerInboundNatPool natPool : resource.inboundNatPools().values()) { info.append("\n\t\tInbound NAT pool name: ").append(natPool.name()) .append("\n\t\t\tProtocol: ").append(natPool.protocol().toString()) .append("\n\t\t\tFrontend: ").append(natPool.frontend().name()) .append("\n\t\t\tFrontend port range: ") .append(natPool.frontendPortRangeStart()) .append("-") .append(natPool.frontendPortRangeEnd()) .append("\n\t\t\tBackend port: ").append(natPool.backendPort()); } info.append("\n\tBackends: ") .append(resource.backends().size()); for (LoadBalancerBackend backend : resource.backends().values()) { info.append("\n\t\tBackend name: ").append(backend.name()); info.append("\n\t\t\tReferenced NICs: ") .append(backend.backendNicIPConfigurationNames().entrySet().size()); for (Map.Entry<String, String> entry : backend.backendNicIPConfigurationNames().entrySet()) { info.append("\n\t\t\t\tNIC ID: ").append(entry.getKey()) .append(" - IP Config: ").append(entry.getValue()); } Set<String> vmIds = backend.getVirtualMachineIds(); info.append("\n\t\t\tReferenced virtual machine ids: ") .append(vmIds.size()); for (String vmId : vmIds) { info.append("\n\t\t\t\tVM ID: ").append(vmId); } info.append("\n\t\t\tReferenced load balancing rules: ") .append(new ArrayList<String>(backend.loadBalancingRules().keySet())); } System.out.println(info.toString()); } /** * Print app service domain. * * @param resource an app service domain */ public static void print(AppServiceDomain resource) { StringBuilder builder = new StringBuilder().append("Domain: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tCreated time: ").append(resource.createdTime()) .append("\n\tExpiration time: ").append(resource.expirationTime()) .append("\n\tContact: "); Contact contact = resource.registrantContact(); if (contact == null) { builder = builder.append("Private"); } else { builder = builder.append("\n\t\tName: ").append(contact.nameFirst() + " " + contact.nameLast()); } builder = builder.append("\n\tName servers: "); for (String nameServer : resource.nameServers()) { builder = builder.append("\n\t\t" + nameServer); } System.out.println(builder.toString()); } /** * Print app service certificate order. * * @param resource an app service certificate order */ public static void print(AppServiceCertificateOrder resource) { StringBuilder builder = new StringBuilder().append("App service certificate order: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDistinguished name: ").append(resource.distinguishedName()) .append("\n\tProduct type: ").append(resource.productType()) .append("\n\tValid years: ").append(resource.validityInYears()) .append("\n\tStatus: ").append(resource.status()) .append("\n\tIssuance time: ").append(resource.lastCertificateIssuanceTime()) .append("\n\tSigned certificate: ").append(resource.signedCertificate() == null ? null : resource.signedCertificate().thumbprint()); System.out.println(builder.toString()); } /** * Print app service plan. * * @param resource an app service plan */ public static void print(AppServicePlan resource) { StringBuilder builder = new StringBuilder().append("App service certificate order: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tPricing tier: ").append(resource.pricingTier()); System.out.println(builder.toString()); } /** * Print a web app. * * @param resource a web app */ public static void print(WebAppBase resource) { StringBuilder builder = new StringBuilder().append("Web app: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tState: ").append(resource.state()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDefault hostname: ").append(resource.defaultHostname()) .append("\n\tApp service plan: ").append(resource.appServicePlanId()) .append("\n\tHost name bindings: "); for (HostnameBinding binding : resource.getHostnameBindings().values()) { builder = builder.append("\n\t\t" + binding.toString()); } builder = builder.append("\n\tSSL bindings: "); for (HostnameSslState binding : resource.hostnameSslStates().values()) { builder = builder.append("\n\t\t" + binding.name() + ": " + binding.sslState()); if (binding.sslState() != null && binding.sslState() != SslState.DISABLED) { builder = builder.append(" - " + binding.thumbprint()); } } builder = builder.append("\n\tApp settings: "); for (AppSetting setting : resource.getAppSettings().values()) { builder = builder.append("\n\t\t" + setting.key() + ": " + setting.value() + (setting.sticky() ? " - slot setting" : "")); } builder = builder.append("\n\tConnection strings: "); for (ConnectionString conn : resource.getConnectionStrings().values()) { builder = builder.append("\n\t\t" + conn.name() + ": " + conn.value() + " - " + conn.type() + (conn.sticky() ? " - slot setting" : "")); } System.out.println(builder.toString()); } /** * Print a web site. * * @param resource a web site */ public static void print(WebSiteBase resource) { StringBuilder builder = new StringBuilder().append("Web app: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tState: ").append(resource.state()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDefault hostname: ").append(resource.defaultHostname()) .append("\n\tApp service plan: ").append(resource.appServicePlanId()); builder = builder.append("\n\tSSL bindings: "); for (HostnameSslState binding : resource.hostnameSslStates().values()) { builder = builder.append("\n\t\t" + binding.name() + ": " + binding.sslState()); if (binding.sslState() != null && binding.sslState() != SslState.DISABLED) { builder = builder.append(" - " + binding.thumbprint()); } } System.out.println(builder.toString()); } /** * Print a traffic manager profile. * * @param profile a traffic manager profile */ public static void print(TrafficManagerProfile profile) { StringBuilder info = new StringBuilder(); info.append("Traffic Manager Profile: ").append(profile.id()) .append("\n\tName: ").append(profile.name()) .append("\n\tResource group: ").append(profile.resourceGroupName()) .append("\n\tRegion: ").append(profile.regionName()) .append("\n\tTags: ").append(profile.tags()) .append("\n\tDNSLabel: ").append(profile.dnsLabel()) .append("\n\tFQDN: ").append(profile.fqdn()) .append("\n\tTTL: ").append(profile.timeToLive()) .append("\n\tEnabled: ").append(profile.isEnabled()) .append("\n\tRoutingMethod: ").append(profile.trafficRoutingMethod()) .append("\n\tMonitor status: ").append(profile.monitorStatus()) .append("\n\tMonitoring port: ").append(profile.monitoringPort()) .append("\n\tMonitoring path: ").append(profile.monitoringPath()); Map<String, TrafficManagerAzureEndpoint> azureEndpoints = profile.azureEndpoints(); if (!azureEndpoints.isEmpty()) { info.append("\n\tAzure endpoints:"); int idx = 1; for (TrafficManagerAzureEndpoint endpoint : azureEndpoints.values()) { info.append("\n\t\tAzure endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tTarget resourceId: ").append(endpoint.targetAzureResourceId()) .append("\n\t\t\tTarget resourceType: ").append(endpoint.targetResourceType()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } Map<String, TrafficManagerExternalEndpoint> externalEndpoints = profile.externalEndpoints(); if (!externalEndpoints.isEmpty()) { info.append("\n\tExternal endpoints:"); int idx = 1; for (TrafficManagerExternalEndpoint endpoint : externalEndpoints.values()) { info.append("\n\t\tExternal endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tFQDN: ").append(endpoint.fqdn()) .append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } Map<String, TrafficManagerNestedProfileEndpoint> nestedProfileEndpoints = profile.nestedProfileEndpoints(); if (!nestedProfileEndpoints.isEmpty()) { info.append("\n\tNested profile endpoints:"); int idx = 1; for (TrafficManagerNestedProfileEndpoint endpoint : nestedProfileEndpoints.values()) { info.append("\n\t\tNested profile endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tNested profileId: ").append(endpoint.nestedProfileId()) .append("\n\t\t\tMinimum child threshold: ").append(endpoint.minimumChildEndpointCount()) .append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } System.out.println(info.toString()); } /** * Print a dns zone. * * @param dnsZone a dns zone */ public static void print(DnsZone dnsZone) { StringBuilder info = new StringBuilder(); info.append("DNS Zone: ").append(dnsZone.id()) .append("\n\tName (Top level domain): ").append(dnsZone.name()) .append("\n\tResource group: ").append(dnsZone.resourceGroupName()) .append("\n\tRegion: ").append(dnsZone.regionName()) .append("\n\tTags: ").append(dnsZone.tags()) .append("\n\tName servers:"); for (String nameServer : dnsZone.nameServers()) { info.append("\n\t\t").append(nameServer); } SoaRecordSet soaRecordSet = dnsZone.getSoaRecordSet(); SoaRecord soaRecord = soaRecordSet.record(); info.append("\n\tSOA Record:") .append("\n\t\tHost:").append(soaRecord.host()) .append("\n\t\tEmail:").append(soaRecord.email()) .append("\n\t\tExpire time (seconds):").append(soaRecord.expireTime()) .append("\n\t\tRefresh time (seconds):").append(soaRecord.refreshTime()) .append("\n\t\tRetry time (seconds):").append(soaRecord.retryTime()) .append("\n\t\tNegative response cache ttl (seconds):").append(soaRecord.minimumTtl()) .append("\n\t\tTTL (seconds):").append(soaRecordSet.timeToLive()); PagedIterable<ARecordSet> aRecordSets = dnsZone.aRecordSets().list(); info.append("\n\tA Record sets:"); for (ARecordSet aRecordSet : aRecordSets) { info.append("\n\t\tId: ").append(aRecordSet.id()) .append("\n\t\tName: ").append(aRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aRecordSet.timeToLive()) .append("\n\t\tIP v4 addresses: "); for (String ipAddress : aRecordSet.ipv4Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<AaaaRecordSet> aaaaRecordSets = dnsZone.aaaaRecordSets().list(); info.append("\n\tAAAA Record sets:"); for (AaaaRecordSet aaaaRecordSet : aaaaRecordSets) { info.append("\n\t\tId: ").append(aaaaRecordSet.id()) .append("\n\t\tName: ").append(aaaaRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aaaaRecordSet.timeToLive()) .append("\n\t\tIP v6 addresses: "); for (String ipAddress : aaaaRecordSet.ipv6Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cNameRecordSets().list(); info.append("\n\tCNAME Record sets:"); for (CnameRecordSet cnameRecordSet : cnameRecordSets) { info.append("\n\t\tId: ").append(cnameRecordSet.id()) .append("\n\t\tName: ").append(cnameRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(cnameRecordSet.timeToLive()) .append("\n\t\tCanonical name: ").append(cnameRecordSet.canonicalName()); } PagedIterable<MxRecordSet> mxRecordSets = dnsZone.mxRecordSets().list(); info.append("\n\tMX Record sets:"); for (MxRecordSet mxRecordSet : mxRecordSets) { info.append("\n\t\tId: ").append(mxRecordSet.id()) .append("\n\t\tName: ").append(mxRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(mxRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (MxRecord mxRecord : mxRecordSet.records()) { info.append("\n\t\t\tExchange server, Preference: ") .append(mxRecord.exchange()) .append(" ") .append(mxRecord.preference()); } } PagedIterable<NsRecordSet> nsRecordSets = dnsZone.nsRecordSets().list(); info.append("\n\tNS Record sets:"); for (NsRecordSet nsRecordSet : nsRecordSets) { info.append("\n\t\tId: ").append(nsRecordSet.id()) .append("\n\t\tName: ").append(nsRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(nsRecordSet.timeToLive()) .append("\n\t\tName servers: "); for (String nameServer : nsRecordSet.nameServers()) { info.append("\n\t\t\t").append(nameServer); } } PagedIterable<PtrRecordSet> ptrRecordSets = dnsZone.ptrRecordSets().list(); info.append("\n\tPTR Record sets:"); for (PtrRecordSet ptrRecordSet : ptrRecordSets) { info.append("\n\t\tId: ").append(ptrRecordSet.id()) .append("\n\t\tName: ").append(ptrRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(ptrRecordSet.timeToLive()) .append("\n\t\tTarget domain names: "); for (String domainNames : ptrRecordSet.targetDomainNames()) { info.append("\n\t\t\t").append(domainNames); } } PagedIterable<SrvRecordSet> srvRecordSets = dnsZone.srvRecordSets().list(); info.append("\n\tSRV Record sets:"); for (SrvRecordSet srvRecordSet : srvRecordSets) { info.append("\n\t\tId: ").append(srvRecordSet.id()) .append("\n\t\tName: ").append(srvRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(srvRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (SrvRecord srvRecord : srvRecordSet.records()) { info.append("\n\t\t\tTarget, Port, Priority, Weight: ") .append(srvRecord.target()) .append(", ") .append(srvRecord.port()) .append(", ") .append(srvRecord.priority()) .append(", ") .append(srvRecord.weight()); } } PagedIterable<TxtRecordSet> txtRecordSets = dnsZone.txtRecordSets().list(); info.append("\n\tTXT Record sets:"); for (TxtRecordSet txtRecordSet : txtRecordSets) { info.append("\n\t\tId: ").append(txtRecordSet.id()) .append("\n\t\tName: ").append(txtRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(txtRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (TxtRecord txtRecord : txtRecordSet.records()) { if (txtRecord.value().size() > 0) { info.append("\n\t\t\tValue: ").append(txtRecord.value().get(0)); } } } System.out.println(info.toString()); } /** * Print a private dns zone. * * @param privateDnsZone a private dns zone */ public static void print(PrivateDnsZone privateDnsZone) { StringBuilder info = new StringBuilder(); info.append("Private DNS Zone: ").append(privateDnsZone.id()) .append("\n\tName (Top level domain): ").append(privateDnsZone.name()) .append("\n\tResource group: ").append(privateDnsZone.resourceGroupName()) .append("\n\tRegion: ").append(privateDnsZone.regionName()) .append("\n\tTags: ").append(privateDnsZone.tags()) .append("\n\tName servers:"); com.azure.resourcemanager.privatedns.models.SoaRecordSet soaRecordSet = privateDnsZone.getSoaRecordSet(); com.azure.resourcemanager.privatedns.models.SoaRecord soaRecord = soaRecordSet.record(); info.append("\n\tSOA Record:") .append("\n\t\tHost:").append(soaRecord.host()) .append("\n\t\tEmail:").append(soaRecord.email()) .append("\n\t\tExpire time (seconds):").append(soaRecord.expireTime()) .append("\n\t\tRefresh time (seconds):").append(soaRecord.refreshTime()) .append("\n\t\tRetry time (seconds):").append(soaRecord.retryTime()) .append("\n\t\tNegative response cache ttl (seconds):").append(soaRecord.minimumTtl()) .append("\n\t\tTTL (seconds):").append(soaRecordSet.timeToLive()); PagedIterable<com.azure.resourcemanager.privatedns.models.ARecordSet> aRecordSets = privateDnsZone .aRecordSets().list(); info.append("\n\tA Record sets:"); for (com.azure.resourcemanager.privatedns.models.ARecordSet aRecordSet : aRecordSets) { info.append("\n\t\tId: ").append(aRecordSet.id()) .append("\n\t\tName: ").append(aRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aRecordSet.timeToLive()) .append("\n\t\tIP v4 addresses: "); for (String ipAddress : aRecordSet.ipv4Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<com.azure.resourcemanager.privatedns.models.AaaaRecordSet> aaaaRecordSets = privateDnsZone .aaaaRecordSets().list(); info.append("\n\tAAAA Record sets:"); for (com.azure.resourcemanager.privatedns.models.AaaaRecordSet aaaaRecordSet : aaaaRecordSets) { info.append("\n\t\tId: ").append(aaaaRecordSet.id()) .append("\n\t\tName: ").append(aaaaRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aaaaRecordSet.timeToLive()) .append("\n\t\tIP v6 addresses: "); for (String ipAddress : aaaaRecordSet.ipv6Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<com.azure.resourcemanager.privatedns.models.CnameRecordSet> cnameRecordSets = privateDnsZone.cnameRecordSets().list(); info.append("\n\tCNAME Record sets:"); for (com.azure.resourcemanager.privatedns.models.CnameRecordSet cnameRecordSet : cnameRecordSets) { info.append("\n\t\tId: ").append(cnameRecordSet.id()) .append("\n\t\tName: ").append(cnameRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(cnameRecordSet.timeToLive()) .append("\n\t\tCanonical name: ").append(cnameRecordSet.canonicalName()); } PagedIterable<com.azure.resourcemanager.privatedns.models.MxRecordSet> mxRecordSets = privateDnsZone.mxRecordSets().list(); info.append("\n\tMX Record sets:"); for (com.azure.resourcemanager.privatedns.models.MxRecordSet mxRecordSet : mxRecordSets) { info.append("\n\t\tId: ").append(mxRecordSet.id()) .append("\n\t\tName: ").append(mxRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(mxRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.MxRecord mxRecord : mxRecordSet.records()) { info.append("\n\t\t\tExchange server, Preference: ") .append(mxRecord.exchange()) .append(" ") .append(mxRecord.preference()); } } PagedIterable<com.azure.resourcemanager.privatedns.models.PtrRecordSet> ptrRecordSets = privateDnsZone .ptrRecordSets().list(); info.append("\n\tPTR Record sets:"); for (com.azure.resourcemanager.privatedns.models.PtrRecordSet ptrRecordSet : ptrRecordSets) { info.append("\n\t\tId: ").append(ptrRecordSet.id()) .append("\n\t\tName: ").append(ptrRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(ptrRecordSet.timeToLive()) .append("\n\t\tTarget domain names: "); for (String domainNames : ptrRecordSet.targetDomainNames()) { info.append("\n\t\t\t").append(domainNames); } } PagedIterable<com.azure.resourcemanager.privatedns.models.SrvRecordSet> srvRecordSets = privateDnsZone .srvRecordSets().list(); info.append("\n\tSRV Record sets:"); for (com.azure.resourcemanager.privatedns.models.SrvRecordSet srvRecordSet : srvRecordSets) { info.append("\n\t\tId: ").append(srvRecordSet.id()) .append("\n\t\tName: ").append(srvRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(srvRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.SrvRecord srvRecord : srvRecordSet.records()) { info.append("\n\t\t\tTarget, Port, Priority, Weight: ") .append(srvRecord.target()) .append(", ") .append(srvRecord.port()) .append(", ") .append(srvRecord.priority()) .append(", ") .append(srvRecord.weight()); } } PagedIterable<com.azure.resourcemanager.privatedns.models.TxtRecordSet> txtRecordSets = privateDnsZone .txtRecordSets().list(); info.append("\n\tTXT Record sets:"); for (com.azure.resourcemanager.privatedns.models.TxtRecordSet txtRecordSet : txtRecordSets) { info.append("\n\t\tId: ").append(txtRecordSet.id()) .append("\n\t\tName: ").append(txtRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(txtRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.TxtRecord txtRecord : txtRecordSet.records()) { if (txtRecord.value().size() > 0) { info.append("\n\t\t\tValue: ").append(txtRecord.value().get(0)); } } } PagedIterable<VirtualNetworkLink> virtualNetworkLinks = privateDnsZone.virtualNetworkLinks().list(); info.append("\n\tVirtual Network Links:"); for (VirtualNetworkLink virtualNetworkLink : virtualNetworkLinks) { info.append("\n\tId: ").append(virtualNetworkLink.id()) .append("\n\tName: ").append(virtualNetworkLink.name()) .append("\n\tReference of Virtual Network: ").append(virtualNetworkLink.referencedVirtualNetworkId()) .append("\n\tRegistration enabled: ").append(virtualNetworkLink.isAutoRegistrationEnabled()); } System.out.println(info.toString()); } /** * Print an Azure Container Registry. * * @param azureRegistry an Azure Container Registry */ public static void print(Registry azureRegistry) { StringBuilder info = new StringBuilder(); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); info.append("Azure Container Registry: ").append(azureRegistry.id()) .append("\n\tName: ").append(azureRegistry.name()) .append("\n\tServer Url: ").append(azureRegistry.loginServerUrl()) .append("\n\tUser: ").append(acrCredentials.username()) .append("\n\tFirst Password: ").append(acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)) .append("\n\tSecond Password: ").append(acrCredentials.accessKeys().get(AccessKeyType.SECONDARY)); System.out.println(info.toString()); } /** * Print an Azure Container Service (AKS). * * @param kubernetesCluster a managed container service */ public static void print(KubernetesCluster kubernetesCluster) { StringBuilder info = new StringBuilder(); info.append("Azure Container Service: ").append(kubernetesCluster.id()) .append("\n\tName: ").append(kubernetesCluster.name()) .append("\n\tFQDN: ").append(kubernetesCluster.fqdn()) .append("\n\tDNS prefix label: ").append(kubernetesCluster.dnsPrefix()) .append("\n\t\tWith Agent pool name: ").append(new ArrayList<>(kubernetesCluster.agentPools().keySet()).get(0)) .append("\n\t\tAgent pool count: ").append(new ArrayList<>(kubernetesCluster.agentPools().values()).get(0).count()) .append("\n\t\tAgent pool VM size: ").append(new ArrayList<>(kubernetesCluster.agentPools().values()).get(0).vmSize().toString()) .append("\n\tLinux user name: ").append(kubernetesCluster.linuxRootUsername()) .append("\n\tSSH key: ").append(kubernetesCluster.sshKey()) .append("\n\tService principal client ID: ").append(kubernetesCluster.servicePrincipalClientId()); System.out.println(info.toString()); } /** * Print an Azure Search Service. * * @param searchService an Azure Search Service */ public static void print(SearchService searchService) { StringBuilder info = new StringBuilder(); AdminKeys adminKeys = searchService.getAdminKeys(); PagedIterable<QueryKey> queryKeys = searchService.listQueryKeys(); info.append("Azure Search: ").append(searchService.id()) .append("\n\tResource group: ").append(searchService.resourceGroupName()) .append("\n\tRegion: ").append(searchService.region()) .append("\n\tTags: ").append(searchService.tags()) .append("\n\tSku: ").append(searchService.sku().name()) .append("\n\tStatus: ").append(searchService.status()) .append("\n\tProvisioning State: ").append(searchService.provisioningState()) .append("\n\tHosting Mode: ").append(searchService.hostingMode()) .append("\n\tReplicas: ").append(searchService.replicaCount()) .append("\n\tPartitions: ").append(searchService.partitionCount()) .append("\n\tPrimary Admin Key: ").append(adminKeys.primaryKey()) .append("\n\tSecondary Admin Key: ").append(adminKeys.secondaryKey()) .append("\n\tQuery keys:"); for (QueryKey queryKey : queryKeys) { info.append("\n\t\tKey name: ").append(queryKey.name()); info.append("\n\t\t Value: ").append(queryKey.key()); } System.out.println(info.toString()); } /** * Retrieve the secondary service principal client ID. * * @param envSecondaryServicePrincipal an Azure Container Registry * @return a service principal client ID * @throws IOException exception */
Changed, thanks!
public static String getSecondaryServicePrincipalClientID(String envSecondaryServicePrincipal) throws IOException { String content = new String(Files.readAllBytes(new File(envSecondaryServicePrincipal).toPath()), StandardCharsets.UTF_8).trim(); HashMap<String, String> auth = new HashMap<>(); if (content.startsWith("{")) { auth = new JacksonAdapter().deserialize(content, auth.getClass(), SerializerEncoding.JSON); return auth.get("clientId"); } else { Properties authSettings = new Properties(); try (FileInputStream credentialsFileStream = new FileInputStream(new File(envSecondaryServicePrincipal))) { authSettings.load(credentialsFileStream); } return authSettings.getProperty("client"); } } /** * Retrieve the secondary service principal secret. * * @param envSecondaryServicePrincipal an Azure Container Registry * @return a service principal secret * @throws IOException exception */ public static String getSecondaryServicePrincipalSecret(String envSecondaryServicePrincipal) throws IOException { String content = new String(Files.readAllBytes(new File(envSecondaryServicePrincipal).toPath()), StandardCharsets.UTF_8).trim(); HashMap<String, String> auth = new HashMap<>(); if (content.startsWith("{")) { auth = new JacksonAdapter().deserialize(content, auth.getClass(), SerializerEncoding.JSON); return auth.get("clientSecret"); } else { Properties authSettings = new Properties(); try (FileInputStream credentialsFileStream = new FileInputStream(new File(envSecondaryServicePrincipal))) { authSettings.load(credentialsFileStream); } return authSettings.getProperty("key"); } } /** * This method creates a certificate for given password. * * @param certPath location of certificate file * @param pfxPath location of pfx file * @param alias User alias * @param password alias password * @param cnName domain name * @param dnsName dns name in subject alternate name * @throws IOException IO Exception */ public static void createCertificate(String certPath, String pfxPath, String alias, String password, String cnName, String dnsName) throws IOException { if (new File(pfxPath).exists()) { return; } String validityInDays = "3650"; String keyAlg = "RSA"; String sigAlg = "SHA1withRSA"; String keySize = "2048"; String storeType = "pkcs12"; String command = "keytool"; String jdkPath = System.getProperty("java.home"); if (jdkPath != null && !jdkPath.isEmpty()) { jdkPath = jdkPath.concat("\\bin"); if (new File(jdkPath).isDirectory()) { command = String.format("%s%s%s", jdkPath, File.separator, command); } } else { return; } String[] commandArgs = {command, "-genkey", "-alias", alias, "-keystore", pfxPath, "-storepass", password, "-validity", validityInDays, "-keyalg", keyAlg, "-sigalg", sigAlg, "-keysize", keySize, "-storetype", storeType, "-dname", "CN=" + cnName, "-ext", "EKU=1.3.6.1.5.5.7.3.1"}; if (dnsName != null) { List<String> args = new ArrayList<>(Arrays.asList(commandArgs)); args.add("-ext"); args.add("san=dns:" + dnsName); commandArgs = args.toArray(new String[0]); } Utils.cmdInvocation(commandArgs, true); File pfxFile = new File(pfxPath); if (pfxFile.exists()) { String[] certCommandArgs = {command, "-export", "-alias", alias, "-storetype", storeType, "-keystore", pfxPath, "-storepass", password, "-rfc", "-file", certPath}; Utils.cmdInvocation(certCommandArgs, true); File cerFile = new File(pfxPath); if (!cerFile.exists()) { throw new IOException( "Error occurred while creating certificate" + String.join(" ", certCommandArgs)); } } else { throw new IOException("Error occurred while creating certificates" + String.join(" ", commandArgs)); } } /** * This method is used for invoking native commands. * * @param command :- command to invoke. * @param ignoreErrorStream : Boolean which controls whether to throw exception or not * based on error stream. * @return result :- depending on the method invocation. * @throws IOException exceptions thrown from the execution */ public static String cmdInvocation(String[] command, boolean ignoreErrorStream) throws IOException { String result = ""; String error = ""; Process process = new ProcessBuilder(command).start(); try ( InputStream inputStream = process.getInputStream(); InputStream errorStream = process.getErrorStream(); BufferedReader br = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); BufferedReader ebr = new BufferedReader(new InputStreamReader(errorStream, StandardCharsets.UTF_8)); ) { result = br.readLine(); process.waitFor(); error = ebr.readLine(); if (error != null && (!"".equals(error))) { if (!ignoreErrorStream) { throw new IOException(error, null); } } } catch (Exception e) { throw new RuntimeException("Exception occurred while invoking command", e); } return result; } /** * Prints information for passed SQL Server. * * @param sqlServer sqlServer to be printed */ public static void print(SqlServer sqlServer) { StringBuilder builder = new StringBuilder().append("Sql Server: ").append(sqlServer.id()) .append("Name: ").append(sqlServer.name()) .append("\n\tResource group: ").append(sqlServer.resourceGroupName()) .append("\n\tRegion: ").append(sqlServer.region()) .append("\n\tSqlServer version: ").append(sqlServer.version()) .append("\n\tFully qualified name for Sql Server: ").append(sqlServer.fullyQualifiedDomainName()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL Database. * * @param database database to be printed */ public static void print(SqlDatabase database) { StringBuilder builder = new StringBuilder().append("Sql Database: ").append(database.id()) .append("Name: ").append(database.name()) .append("\n\tResource group: ").append(database.resourceGroupName()) .append("\n\tRegion: ").append(database.region()) .append("\n\tSqlServer Name: ").append(database.sqlServerName()) .append("\n\tEdition of SQL database: ").append(database.edition()) .append("\n\tCollation of SQL database: ").append(database.collation()) .append("\n\tCreation date of SQL database: ").append(database.creationDate()) .append("\n\tIs data warehouse: ").append(database.isDataWarehouse()) .append("\n\tRequested service objective of SQL database: ").append(database.requestedServiceObjectiveName()) .append("\n\tName of current service objective of SQL database: ").append(database.currentServiceObjectiveName()) .append("\n\tMax size bytes of SQL database: ").append(database.maxSizeBytes()) .append("\n\tDefault secondary location of SQL database: ").append(database.defaultSecondaryLocation()); System.out.println(builder.toString()); } /** * Prints information for the passed firewall rule. * * @param firewallRule firewall rule to be printed. */ public static void print(SqlFirewallRule firewallRule) { StringBuilder builder = new StringBuilder().append("Sql firewall rule: ").append(firewallRule.id()) .append("Name: ").append(firewallRule.name()) .append("\n\tResource group: ").append(firewallRule.resourceGroupName()) .append("\n\tRegion: ").append(firewallRule.region()) .append("\n\tSqlServer Name: ").append(firewallRule.sqlServerName()) .append("\n\tStart IP Address of the firewall rule: ").append(firewallRule.startIpAddress()) .append("\n\tEnd IP Address of the firewall rule: ").append(firewallRule.endIpAddress()); System.out.println(builder.toString()); } /** * Prints information for the passed virtual network rule. * * @param virtualNetworkRule virtual network rule to be printed. */ public static void print(SqlVirtualNetworkRule virtualNetworkRule) { StringBuilder builder = new StringBuilder().append("SQL virtual network rule: ").append(virtualNetworkRule.id()) .append("Name: ").append(virtualNetworkRule.name()) .append("\n\tResource group: ").append(virtualNetworkRule.resourceGroupName()) .append("\n\tSqlServer Name: ").append(virtualNetworkRule.sqlServerName()) .append("\n\tSubnet ID: ").append(virtualNetworkRule.subnetId()) .append("\n\tState: ").append(virtualNetworkRule.state()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL subscription usage metric. * * @param subscriptionUsageMetric metric to be printed. */ public static void print(SqlSubscriptionUsageMetric subscriptionUsageMetric) { StringBuilder builder = new StringBuilder().append("SQL Subscription Usage Metric: ").append(subscriptionUsageMetric.id()) .append("Name: ").append(subscriptionUsageMetric.name()) .append("\n\tDisplay Name: ").append(subscriptionUsageMetric.displayName()) .append("\n\tCurrent Value: ").append(subscriptionUsageMetric.currentValue()) .append("\n\tLimit: ").append(subscriptionUsageMetric.limit()) .append("\n\tUnit: ").append(subscriptionUsageMetric.unit()) .append("\n\tType: ").append(subscriptionUsageMetric.type()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL database usage metric. * * @param dbUsageMetric metric to be printed. */ public static void print(SqlDatabaseUsageMetric dbUsageMetric) { StringBuilder builder = new StringBuilder().append("SQL Database Usage Metric") .append("Name: ").append(dbUsageMetric.name()) .append("\n\tDisplay Name: ").append(dbUsageMetric.displayName()) .append("\n\tCurrent Value: ").append(dbUsageMetric.currentValue()) .append("\n\tLimit: ").append(dbUsageMetric.limit()) .append("\n\tUnit: ").append(dbUsageMetric.unit()); System.out.println(builder.toString()); } /** * Prints information for the passed Failover Group. * * @param failoverGroup the SQL Failover Group to be printed. */ public static void print(SqlFailoverGroup failoverGroup) { StringBuilder builder = new StringBuilder().append("SQL Failover Group: ").append(failoverGroup.id()) .append("Name: ").append(failoverGroup.name()) .append("\n\tResource group: ").append(failoverGroup.resourceGroupName()) .append("\n\tSqlServer Name: ").append(failoverGroup.sqlServerName()) .append("\n\tRead-write endpoint policy: ").append(failoverGroup.readWriteEndpointPolicy()) .append("\n\tData loss grace period: ").append(failoverGroup.readWriteEndpointDataLossGracePeriodMinutes()) .append("\n\tRead-only endpoint policy: ").append(failoverGroup.readOnlyEndpointPolicy()) .append("\n\tReplication state: ").append(failoverGroup.replicationState()) .append("\n\tReplication role: ").append(failoverGroup.replicationRole()); builder.append("\n\tPartner Servers: "); for (PartnerInfo item : failoverGroup.partnerServers()) { builder .append("\n\t\tId: ").append(item.id()) .append("\n\t\tLocation: ").append(item.location()) .append("\n\t\tReplication role: ").append(item.replicationRole()); } builder.append("\n\tDatabases: "); for (String databaseId : failoverGroup.databases()) { builder.append("\n\t\tID: ").append(databaseId); } System.out.println(builder.toString()); } /** * Prints information for the passed SQL server key. * * @param serverKey virtual network rule to be printed. */ public static void print(SqlServerKey serverKey) { StringBuilder builder = new StringBuilder().append("SQL server key: ").append(serverKey.id()) .append("Name: ").append(serverKey.name()) .append("\n\tResource group: ").append(serverKey.resourceGroupName()) .append("\n\tSqlServer Name: ").append(serverKey.sqlServerName()) .append("\n\tRegion: ").append(serverKey.region() != null ? serverKey.region().name() : "") .append("\n\tServer Key Type: ").append(serverKey.serverKeyType()) .append("\n\tServer Key URI: ").append(serverKey.uri()) .append("\n\tServer Key Thumbprint: ").append(serverKey.thumbprint()) .append("\n\tServer Key Creation Date: ").append(serverKey.creationDate() != null ? serverKey.creationDate().toString() : ""); System.out.println(builder.toString()); } /** * Prints information of the elastic pool passed in. * * @param elasticPool elastic pool to be printed */ public static void print(SqlElasticPool elasticPool) { StringBuilder builder = new StringBuilder().append("Sql elastic pool: ").append(elasticPool.id()) .append("Name: ").append(elasticPool.name()) .append("\n\tResource group: ").append(elasticPool.resourceGroupName()) .append("\n\tRegion: ").append(elasticPool.region()) .append("\n\tSqlServer Name: ").append(elasticPool.sqlServerName()) .append("\n\tEdition of elastic pool: ").append(elasticPool.edition()) .append("\n\tTotal number of DTUs in the elastic pool: ").append(elasticPool.dtu()) .append("\n\tMaximum DTUs a database can get in elastic pool: ").append(elasticPool.databaseDtuMax()) .append("\n\tMinimum DTUs a database is guaranteed in elastic pool: ").append(elasticPool.databaseDtuMin()) .append("\n\tCreation date for the elastic pool: ").append(elasticPool.creationDate()) .append("\n\tState of the elastic pool: ").append(elasticPool.state()) .append("\n\tStorage capacity in MBs for the elastic pool: ").append(elasticPool.storageCapacity()); System.out.println(builder.toString()); } /** * Prints information of the elastic pool activity. * * @param elasticPoolActivity elastic pool activity to be printed */ public static void print(ElasticPoolActivity elasticPoolActivity) { StringBuilder builder = new StringBuilder().append("Sql elastic pool activity: ").append(elasticPoolActivity.id()) .append("Name: ").append(elasticPoolActivity.name()) .append("\n\tResource group: ").append(elasticPoolActivity.resourceGroupName()) .append("\n\tState: ").append(elasticPoolActivity.state()) .append("\n\tElastic pool name: ").append(elasticPoolActivity.elasticPoolName()) .append("\n\tStart time of activity: ").append(elasticPoolActivity.startTime()) .append("\n\tEnd time of activity: ").append(elasticPoolActivity.endTime()) .append("\n\tError code of activity: ").append(elasticPoolActivity.errorCode()) .append("\n\tError message of activity: ").append(elasticPoolActivity.errorMessage()) .append("\n\tError severity of activity: ").append(elasticPoolActivity.errorSeverity()) .append("\n\tOperation: ").append(elasticPoolActivity.operation()) .append("\n\tCompleted percentage of activity: ").append(elasticPoolActivity.percentComplete()); System.out.println(builder.toString()); } /** * Print an application gateway. * * @param resource an application gateway */ public static void print(ApplicationGateway resource) { StringBuilder info = new StringBuilder(); info.append("Application gateway: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tSKU: ").append(resource.sku().toString()) .append("\n\tOperational state: ").append(resource.operationalState()) .append("\n\tInternet-facing? ").append(resource.isPublic()) .append("\n\tInternal? ").append(resource.isPrivate()) .append("\n\tDefault private IP address: ").append(resource.privateIpAddress()) .append("\n\tPrivate IP address allocation method: ").append(resource.privateIpAllocationMethod()) .append("\n\tDisabled SSL protocols: ").append(resource.disabledSslProtocols().toString()); Map<String, ApplicationGatewayIpConfiguration> ipConfigs = resource.ipConfigurations(); info.append("\n\tIP configurations: ").append(ipConfigs.size()); for (ApplicationGatewayIpConfiguration ipConfig : ipConfigs.values()) { info.append("\n\t\tName: ").append(ipConfig.name()) .append("\n\t\t\tNetwork id: ").append(ipConfig.networkId()) .append("\n\t\t\tSubnet name: ").append(ipConfig.subnetName()); } Map<String, ApplicationGatewayFrontend> frontends = resource.frontends(); info.append("\n\tFrontends: ").append(frontends.size()); for (ApplicationGatewayFrontend frontend : frontends.values()) { info.append("\n\t\tName: ").append(frontend.name()) .append("\n\t\t\tPublic? ").append(frontend.isPublic()); if (frontend.isPublic()) { info.append("\n\t\t\tPublic IP address ID: ").append(frontend.publicIpAddressId()); } if (frontend.isPrivate()) { info.append("\n\t\t\tPrivate IP address: ").append(frontend.privateIpAddress()) .append("\n\t\t\tPrivate IP allocation method: ").append(frontend.privateIpAllocationMethod()) .append("\n\t\t\tSubnet name: ").append(frontend.subnetName()) .append("\n\t\t\tVirtual network ID: ").append(frontend.networkId()); } } Map<String, ApplicationGatewayBackend> backends = resource.backends(); info.append("\n\tBackends: ").append(backends.size()); for (ApplicationGatewayBackend backend : backends.values()) { info.append("\n\t\tName: ").append(backend.name()) .append("\n\t\t\tAssociated NIC IP configuration IDs: ").append(backend.backendNicIPConfigurationNames().keySet()); Collection<ApplicationGatewayBackendAddress> addresses = backend.addresses(); info.append("\n\t\t\tAddresses: ").append(addresses.size()); for (ApplicationGatewayBackendAddress address : addresses) { info.append("\n\t\t\t\tFQDN: ").append(address.fqdn()) .append("\n\t\t\t\tIP: ").append(address.ipAddress()); } } Map<String, ApplicationGatewayBackendHttpConfiguration> httpConfigs = resource.backendHttpConfigurations(); info.append("\n\tHTTP Configurations: ").append(httpConfigs.size()); for (ApplicationGatewayBackendHttpConfiguration httpConfig : httpConfigs.values()) { info.append("\n\t\tName: ").append(httpConfig.name()) .append("\n\t\t\tCookie based affinity: ").append(httpConfig.cookieBasedAffinity()) .append("\n\t\t\tPort: ").append(httpConfig.port()) .append("\n\t\t\tRequest timeout in seconds: ").append(httpConfig.requestTimeout()) .append("\n\t\t\tProtocol: ").append(httpConfig.protocol()) .append("\n\t\tHost header: ").append(httpConfig.hostHeader()) .append("\n\t\tHost header comes from backend? ").append(httpConfig.isHostHeaderFromBackend()) .append("\n\t\tConnection draining timeout in seconds: ").append(httpConfig.connectionDrainingTimeoutInSeconds()) .append("\n\t\tAffinity cookie name: ").append(httpConfig.affinityCookieName()) .append("\n\t\tPath: ").append(httpConfig.path()); ApplicationGatewayProbe probe = httpConfig.probe(); if (probe != null) { info.append("\n\t\tProbe: " + probe.name()); } info.append("\n\t\tIs probe enabled? ").append(httpConfig.isProbeEnabled()); } Map<String, ApplicationGatewaySslCertificate> sslCerts = resource.sslCertificates(); info.append("\n\tSSL certificates: ").append(sslCerts.size()); for (ApplicationGatewaySslCertificate cert : sslCerts.values()) { info.append("\n\t\tName: ").append(cert.name()) .append("\n\t\t\tCert data: ").append(cert.publicData()); } Map<String, ApplicationGatewayRedirectConfiguration> redirects = resource.redirectConfigurations(); info.append("\n\tRedirect configurations: ").append(redirects.size()); for (ApplicationGatewayRedirectConfiguration redirect : redirects.values()) { info.append("\n\t\tName: ").append(redirect.name()) .append("\n\t\tTarget URL: ").append(redirect.type()) .append("\n\t\tTarget URL: ").append(redirect.targetUrl()) .append("\n\t\tTarget listener: ").append(redirect.targetListener() != null ? redirect.targetListener().name() : null) .append("\n\t\tIs path included? ").append(redirect.isPathIncluded()) .append("\n\t\tIs query string included? ").append(redirect.isQueryStringIncluded()) .append("\n\t\tReferencing request routing rules: ").append(redirect.requestRoutingRules().values()); } Map<String, ApplicationGatewayListener> listeners = resource.listeners(); info.append("\n\tHTTP listeners: ").append(listeners.size()); for (ApplicationGatewayListener listener : listeners.values()) { info.append("\n\t\tName: ").append(listener.name()) .append("\n\t\t\tHost name: ").append(listener.hostname()) .append("\n\t\t\tServer name indication required? ").append(listener.requiresServerNameIndication()) .append("\n\t\t\tAssociated frontend name: ").append(listener.frontend().name()) .append("\n\t\t\tFrontend port name: ").append(listener.frontendPortName()) .append("\n\t\t\tFrontend port number: ").append(listener.frontendPortNumber()) .append("\n\t\t\tProtocol: ").append(listener.protocol().toString()); if (listener.sslCertificate() != null) { info.append("\n\t\t\tAssociated SSL certificate: ").append(listener.sslCertificate().name()); } } Map<String, ApplicationGatewayProbe> probes = resource.probes(); info.append("\n\tProbes: ").append(probes.size()); for (ApplicationGatewayProbe probe : probes.values()) { info.append("\n\t\tName: ").append(probe.name()) .append("\n\t\tProtocol:").append(probe.protocol().toString()) .append("\n\t\tInterval in seconds: ").append(probe.timeBetweenProbesInSeconds()) .append("\n\t\tRetries: ").append(probe.retriesBeforeUnhealthy()) .append("\n\t\tTimeout: ").append(probe.timeoutInSeconds()) .append("\n\t\tHost: ").append(probe.host()) .append("\n\t\tHealthy HTTP response status code ranges: ").append(probe.healthyHttpResponseStatusCodeRanges()) .append("\n\t\tHealthy HTTP response body contents: ").append(probe.healthyHttpResponseBodyContents()); } Map<String, ApplicationGatewayRequestRoutingRule> rules = resource.requestRoutingRules(); info.append("\n\tRequest routing rules: ").append(rules.size()); for (ApplicationGatewayRequestRoutingRule rule : rules.values()) { info.append("\n\t\tName: ").append(rule.name()) .append("\n\t\tType: ").append(rule.ruleType()) .append("\n\t\tPublic IP address ID: ").append(rule.publicIpAddressId()) .append("\n\t\tHost name: ").append(rule.hostname()) .append("\n\t\tServer name indication required? ").append(rule.requiresServerNameIndication()) .append("\n\t\tFrontend port: ").append(rule.frontendPort()) .append("\n\t\tFrontend protocol: ").append(rule.frontendProtocol().toString()) .append("\n\t\tBackend port: ").append(rule.backendPort()) .append("\n\t\tCookie based affinity enabled? ").append(rule.cookieBasedAffinity()) .append("\n\t\tRedirect configuration: ").append(rule.redirectConfiguration() != null ? rule.redirectConfiguration().name() : "(none)"); Collection<ApplicationGatewayBackendAddress> addresses = rule.backendAddresses(); info.append("\n\t\t\tBackend addresses: ").append(addresses.size()); for (ApplicationGatewayBackendAddress address : addresses) { info.append("\n\t\t\t\t") .append(address.fqdn()) .append(" [").append(address.ipAddress()).append("]"); } info.append("\n\t\t\tSSL certificate name: "); ApplicationGatewaySslCertificate cert = rule.sslCertificate(); if (cert == null) { info.append("(None)"); } else { info.append(cert.name()); } info.append("\n\t\t\tAssociated backend address pool: "); ApplicationGatewayBackend backend = rule.backend(); if (backend == null) { info.append("(None)"); } else { info.append(backend.name()); } info.append("\n\t\t\tAssociated backend HTTP settings configuration: "); ApplicationGatewayBackendHttpConfiguration config = rule.backendHttpConfiguration(); if (config == null) { info.append("(None)"); } else { info.append(config.name()); } info.append("\n\t\t\tAssociated frontend listener: "); ApplicationGatewayListener listener = rule.listener(); if (listener == null) { info.append("(None)"); } else { info.append(config.name()); } } System.out.println(info.toString()); } /** * Prints information of a virtual machine custom image. * * @param image the image */ public static void print(VirtualMachineCustomImage image) { StringBuilder builder = new StringBuilder().append("Virtual machine custom image: ").append(image.id()) .append("Name: ").append(image.name()) .append("\n\tResource group: ").append(image.resourceGroupName()) .append("\n\tCreated from virtual machine: ").append(image.sourceVirtualMachineId()); builder.append("\n\tOS disk image: ") .append("\n\t\tOperating system: ").append(image.osDiskImage().osType()) .append("\n\t\tOperating system state: ").append(image.osDiskImage().osState()) .append("\n\t\tCaching: ").append(image.osDiskImage().caching()) .append("\n\t\tSize (GB): ").append(image.osDiskImage().diskSizeGB()); if (image.isCreatedFromVirtualMachine()) { builder.append("\n\t\tSource virtual machine: ").append(image.sourceVirtualMachineId()); } if (image.osDiskImage().managedDisk() != null) { builder.append("\n\t\tSource managed disk: ").append(image.osDiskImage().managedDisk().id()); } if (image.osDiskImage().snapshot() != null) { builder.append("\n\t\tSource snapshot: ").append(image.osDiskImage().snapshot().id()); } if (image.osDiskImage().blobUri() != null) { builder.append("\n\t\tSource un-managed vhd: ").append(image.osDiskImage().blobUri()); } if (image.dataDiskImages() != null) { for (ImageDataDisk diskImage : image.dataDiskImages().values()) { builder.append("\n\tDisk Image (Lun) .append("\n\t\tCaching: ").append(diskImage.caching()) .append("\n\t\tSize (GB): ").append(diskImage.diskSizeGB()); if (image.isCreatedFromVirtualMachine()) { builder.append("\n\t\tSource virtual machine: ").append(image.sourceVirtualMachineId()); } if (diskImage.managedDisk() != null) { builder.append("\n\t\tSource managed disk: ").append(diskImage.managedDisk().id()); } if (diskImage.snapshot() != null) { builder.append("\n\t\tSource snapshot: ").append(diskImage.snapshot().id()); } if (diskImage.blobUri() != null) { builder.append("\n\t\tSource un-managed vhd: ").append(diskImage.blobUri()); } } } System.out.println(builder.toString()); } /** * Uploads a file to an Azure app service for Web App. * * @param profile the publishing profile for the app service. * @param fileName the name of the file on server * @param file the local file */ public static void uploadFileViaFtp(PublishingProfile profile, String fileName, InputStream file) { String path = "./site/wwwroot/webapps"; uploadFileViaFtp(profile, fileName, file, path); } /** * Uploads a file to an Azure app service for Function App. * * @param profile the publishing profile for the app service. * @param fileName the name of the file on server * @param file the local file */ public static void uploadFileForFunctionViaFtp(PublishingProfile profile, String fileName, InputStream file) { String path = "./site/wwwroot"; uploadFileViaFtp(profile, fileName, file, path); } private static void uploadFileViaFtp(PublishingProfile profile, String fileName, InputStream file, String path) { FTPClient ftpClient = new FTPClient(); String[] ftpUrlSegments = profile.ftpUrl().split("/", 2); String server = ftpUrlSegments[0]; if (fileName.contains("/")) { int lastslash = fileName.lastIndexOf('/'); path = path + "/" + fileName.substring(0, lastslash); fileName = fileName.substring(lastslash + 1); } try { ftpClient.connect(server); ftpClient.enterLocalPassiveMode(); ftpClient.login(profile.ftpUsername(), profile.ftpPassword()); ftpClient.setFileType(FTP.BINARY_FILE_TYPE); for (String segment : path.split("/")) { if (!ftpClient.changeWorkingDirectory(segment)) { ftpClient.makeDirectory(segment); ftpClient.changeWorkingDirectory(segment); } } ftpClient.storeFile(fileName, file); ftpClient.disconnect(); } catch (IOException e) { e.printStackTrace(); } } /** * Print service bus namespace info. * * @param serviceBusNamespace a service bus namespace */ public static void print(ServiceBusNamespace serviceBusNamespace) { StringBuilder builder = new StringBuilder() .append("Service bus Namespace: ").append(serviceBusNamespace.id()) .append("\n\tName: ").append(serviceBusNamespace.name()) .append("\n\tRegion: ").append(serviceBusNamespace.regionName()) .append("\n\tResourceGroupName: ").append(serviceBusNamespace.resourceGroupName()) .append("\n\tCreatedAt: ").append(serviceBusNamespace.createdAt()) .append("\n\tUpdatedAt: ").append(serviceBusNamespace.updatedAt()) .append("\n\tDnsLabel: ").append(serviceBusNamespace.dnsLabel()) .append("\n\tFQDN: ").append(serviceBusNamespace.fqdn()) .append("\n\tSku: ") .append("\n\t\tCapacity: ").append(serviceBusNamespace.sku().capacity()) .append("\n\t\tSkuName: ").append(serviceBusNamespace.sku().name()) .append("\n\t\tTier: ").append(serviceBusNamespace.sku().tier()); System.out.println(builder.toString()); } /** * Print service bus queue info. * * @param queue a service bus queue */ public static void print(Queue queue) { StringBuilder builder = new StringBuilder() .append("Service bus Queue: ").append(queue.id()) .append("\n\tName: ").append(queue.name()) .append("\n\tResourceGroupName: ").append(queue.resourceGroupName()) .append("\n\tCreatedAt: ").append(queue.createdAt()) .append("\n\tUpdatedAt: ").append(queue.updatedAt()) .append("\n\tAccessedAt: ").append(queue.accessedAt()) .append("\n\tActiveMessageCount: ").append(queue.activeMessageCount()) .append("\n\tCurrentSizeInBytes: ").append(queue.currentSizeInBytes()) .append("\n\tDeadLetterMessageCount: ").append(queue.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(queue.defaultMessageTtlDuration()) .append("\n\tDuplicateMessageDetectionHistoryDuration: ").append(queue.duplicateMessageDetectionHistoryDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(queue.isBatchedOperationsEnabled()) .append("\n\tIsDeadLetteringEnabledForExpiredMessages: ").append(queue.isDeadLetteringEnabledForExpiredMessages()) .append("\n\tIsDuplicateDetectionEnabled: ").append(queue.isDuplicateDetectionEnabled()) .append("\n\tIsExpressEnabled: ").append(queue.isExpressEnabled()) .append("\n\tIsPartitioningEnabled: ").append(queue.isPartitioningEnabled()) .append("\n\tIsSessionEnabled: ").append(queue.isSessionEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(queue.deleteOnIdleDurationInMinutes()) .append("\n\tMaxDeliveryCountBeforeDeadLetteringMessage: ").append(queue.maxDeliveryCountBeforeDeadLetteringMessage()) .append("\n\tMaxSizeInMB: ").append(queue.maxSizeInMB()) .append("\n\tMessageCount: ").append(queue.messageCount()) .append("\n\tScheduledMessageCount: ").append(queue.scheduledMessageCount()) .append("\n\tStatus: ").append(queue.status()) .append("\n\tTransferMessageCount: ").append(queue.transferMessageCount()) .append("\n\tLockDurationInSeconds: ").append(queue.lockDurationInSeconds()) .append("\n\tTransferDeadLetterMessageCount: ").append(queue.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print service bus queue authorization keys info. * * @param queueAuthorizationRule a service bus queue authorization keys */ public static void print(QueueAuthorizationRule queueAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus queue authorization rule: ").append(queueAuthorizationRule.id()) .append("\n\tName: ").append(queueAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(queueAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(queueAuthorizationRule.namespaceName()) .append("\n\tQueue Name: ").append(queueAuthorizationRule.queueName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = queueAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print service bus namespace authorization keys info. * * @param keys a service bus namespace authorization keys */ public static void print(AuthorizationKeys keys) { StringBuilder builder = new StringBuilder() .append("Authorization keys: ") .append("\n\tPrimaryKey: ").append(keys.primaryKey()) .append("\n\tPrimaryConnectionString: ").append(keys.primaryConnectionString()) .append("\n\tSecondaryKey: ").append(keys.secondaryKey()) .append("\n\tSecondaryConnectionString: ").append(keys.secondaryConnectionString()); System.out.println(builder.toString()); } /** * Print service bus namespace authorization rule info. * * @param namespaceAuthorizationRule a service bus namespace authorization rule */ public static void print(NamespaceAuthorizationRule namespaceAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus queue authorization rule: ").append(namespaceAuthorizationRule.id()) .append("\n\tName: ").append(namespaceAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(namespaceAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(namespaceAuthorizationRule.namespaceName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = namespaceAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print service bus topic info. * * @param topic a service bus topic */ public static void print(Topic topic) { StringBuilder builder = new StringBuilder() .append("Service bus topic: ").append(topic.id()) .append("\n\tName: ").append(topic.name()) .append("\n\tResourceGroupName: ").append(topic.resourceGroupName()) .append("\n\tCreatedAt: ").append(topic.createdAt()) .append("\n\tUpdatedAt: ").append(topic.updatedAt()) .append("\n\tAccessedAt: ").append(topic.accessedAt()) .append("\n\tActiveMessageCount: ").append(topic.activeMessageCount()) .append("\n\tCurrentSizeInBytes: ").append(topic.currentSizeInBytes()) .append("\n\tDeadLetterMessageCount: ").append(topic.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(topic.defaultMessageTtlDuration()) .append("\n\tDuplicateMessageDetectionHistoryDuration: ").append(topic.duplicateMessageDetectionHistoryDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(topic.isBatchedOperationsEnabled()) .append("\n\tIsDuplicateDetectionEnabled: ").append(topic.isDuplicateDetectionEnabled()) .append("\n\tIsExpressEnabled: ").append(topic.isExpressEnabled()) .append("\n\tIsPartitioningEnabled: ").append(topic.isPartitioningEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(topic.deleteOnIdleDurationInMinutes()) .append("\n\tMaxSizeInMB: ").append(topic.maxSizeInMB()) .append("\n\tScheduledMessageCount: ").append(topic.scheduledMessageCount()) .append("\n\tStatus: ").append(topic.status()) .append("\n\tTransferMessageCount: ").append(topic.transferMessageCount()) .append("\n\tSubscriptionCount: ").append(topic.subscriptionCount()) .append("\n\tTransferDeadLetterMessageCount: ").append(topic.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print service bus subscription info. * * @param serviceBusSubscription a service bus subscription */ public static void print(ServiceBusSubscription serviceBusSubscription) { StringBuilder builder = new StringBuilder() .append("Service bus subscription: ").append(serviceBusSubscription.id()) .append("\n\tName: ").append(serviceBusSubscription.name()) .append("\n\tResourceGroupName: ").append(serviceBusSubscription.resourceGroupName()) .append("\n\tCreatedAt: ").append(serviceBusSubscription.createdAt()) .append("\n\tUpdatedAt: ").append(serviceBusSubscription.updatedAt()) .append("\n\tAccessedAt: ").append(serviceBusSubscription.accessedAt()) .append("\n\tActiveMessageCount: ").append(serviceBusSubscription.activeMessageCount()) .append("\n\tDeadLetterMessageCount: ").append(serviceBusSubscription.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(serviceBusSubscription.defaultMessageTtlDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(serviceBusSubscription.isBatchedOperationsEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(serviceBusSubscription.deleteOnIdleDurationInMinutes()) .append("\n\tScheduledMessageCount: ").append(serviceBusSubscription.scheduledMessageCount()) .append("\n\tStatus: ").append(serviceBusSubscription.status()) .append("\n\tTransferMessageCount: ").append(serviceBusSubscription.transferMessageCount()) .append("\n\tIsDeadLetteringEnabledForExpiredMessages: ").append(serviceBusSubscription.isDeadLetteringEnabledForExpiredMessages()) .append("\n\tIsSessionEnabled: ").append(serviceBusSubscription.isSessionEnabled()) .append("\n\tLockDurationInSeconds: ").append(serviceBusSubscription.lockDurationInSeconds()) .append("\n\tMaxDeliveryCountBeforeDeadLetteringMessage: ").append(serviceBusSubscription.maxDeliveryCountBeforeDeadLetteringMessage()) .append("\n\tIsDeadLetteringEnabledForFilterEvaluationFailedMessages: ").append(serviceBusSubscription.isDeadLetteringEnabledForFilterEvaluationFailedMessages()) .append("\n\tTransferMessageCount: ").append(serviceBusSubscription.transferMessageCount()) .append("\n\tTransferDeadLetterMessageCount: ").append(serviceBusSubscription.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print topic Authorization Rule info. * * @param topicAuthorizationRule a topic Authorization Rule */ public static void print(TopicAuthorizationRule topicAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus topic authorization rule: ").append(topicAuthorizationRule.id()) .append("\n\tName: ").append(topicAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(topicAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(topicAuthorizationRule.namespaceName()) .append("\n\tTopic Name: ").append(topicAuthorizationRule.topicName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = topicAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print CosmosDB info. * * @param cosmosDBAccount a CosmosDB */ public static void print(CosmosDBAccount cosmosDBAccount) { StringBuilder builder = new StringBuilder() .append("CosmosDB: ").append(cosmosDBAccount.id()) .append("\n\tName: ").append(cosmosDBAccount.name()) .append("\n\tResourceGroupName: ").append(cosmosDBAccount.resourceGroupName()) .append("\n\tKind: ").append(cosmosDBAccount.kind().toString()) .append("\n\tDefault consistency level: ").append(cosmosDBAccount.consistencyPolicy().defaultConsistencyLevel()) .append("\n\tIP range filter: ").append(cosmosDBAccount.ipRangeFilter()); DatabaseAccountListKeysResult keys = cosmosDBAccount.listKeys(); DatabaseAccountListReadOnlyKeysResult readOnlyKeys = cosmosDBAccount.listReadOnlyKeys(); builder .append("\n\tPrimary Master Key: ").append(keys.primaryMasterKey()) .append("\n\tSecondary Master Key: ").append(keys.secondaryMasterKey()) .append("\n\tPrimary Read-Only Key: ").append(readOnlyKeys.primaryReadonlyMasterKey()) .append("\n\tSecondary Read-Only Key: ").append(readOnlyKeys.secondaryReadonlyMasterKey()); for (Location writeReplica : cosmosDBAccount.writableReplications()) { builder.append("\n\t\tWrite replication: ") .append("\n\t\t\tName :").append(writeReplica.locationName()); } builder.append("\n\tNumber of read replications: ").append(cosmosDBAccount.readableReplications().size()); for (Location readReplica : cosmosDBAccount.readableReplications()) { builder.append("\n\t\tRead replication: ") .append("\n\t\t\tName :").append(readReplica.locationName()); } } /** * Print Active Directory User info. * * @param user active directory user */ public static void print(ActiveDirectoryUser user) { StringBuilder builder = new StringBuilder() .append("Active Directory User: ").append(user.id()) .append("\n\tName: ").append(user.name()) .append("\n\tMail: ").append(user.mail()) .append("\n\tMail Nickname: ").append(user.mailNickname()) .append("\n\tUser Principal Name: ").append(user.userPrincipalName()); System.out.println(builder.toString()); } /** * Print Active Directory User info. * * @param role role definition */ public static void print(RoleDefinition role) { StringBuilder builder = new StringBuilder() .append("Role Definition: ").append(role.id()) .append("\n\tName: ").append(role.name()) .append("\n\tRole Name: ").append(role.roleName()) .append("\n\tType: ").append(role.type()) .append("\n\tDescription: ").append(role.description()) .append("\n\tType: ").append(role.type()); Set<Permission> permissions = role.permissions(); builder.append("\n\tPermissions: ").append(permissions.size()); for (Permission permission : permissions) { builder.append("\n\t\tPermission Actions: " + permission.actions().size()); for (String action : permission.actions()) { builder.append("\n\t\t\tName :").append(action); } builder.append("\n\t\tPermission Not Actions: " + permission.notActions().size()); for (String notAction : permission.notActions()) { builder.append("\n\t\t\tName :").append(notAction); } builder.append("\n\t\tPermission Data Actions: " + permission.dataActions().size()); for (String notAction : permission.dataActions()) { builder.append("\n\t\t\tName :").append(notAction); } } Set<String> assignableScopes = role.assignableScopes(); builder.append("\n\tAssignable scopes: ").append(assignableScopes.size()); for (String scope : assignableScopes) { builder.append("\n\t\tAssignable Scope: ") .append("\n\t\t\tName :").append(scope); } System.out.println(builder.toString()); } /** * Print Role Assignment info. * * @param roleAssignment role assignment */ public static void print(RoleAssignment roleAssignment) { StringBuilder builder = new StringBuilder() .append("Role Assignment: ") .append("\n\tScope: ").append(roleAssignment.scope()) .append("\n\tPrincipal Id: ").append(roleAssignment.principalId()) .append("\n\tRole Definition Id: ").append(roleAssignment.roleDefinitionId()); System.out.println(builder.toString()); } /** * Print Active Directory Group info. * * @param group active directory group */ public static void print(ActiveDirectoryGroup group) { StringBuilder builder = new StringBuilder() .append("Active Directory Group: ").append(group.id()) .append("\n\tName: ").append(group.name()) .append("\n\tMail: ").append(group.mail()) .append("\n\tSecurity Enabled: ").append(group.securityEnabled()) .append("\n\tGroup members:"); for (ActiveDirectoryObject object : group.listMembers()) { builder.append("\n\t\tType: ").append(object.getClass().getSimpleName()) .append("\tName: ").append(object.name()); } System.out.println(builder.toString()); } /** * Print Active Directory Application info. * * @param application active directory application */ public static void print(ActiveDirectoryApplication application) { StringBuilder builder = new StringBuilder() .append("Active Directory Application: ").append(application.id()) .append("\n\tName: ").append(application.name()) .append("\n\tSign on URL: ").append(application.signOnUrl()) .append("\n\tReply URLs:"); for (String replyUrl : application.replyUrls()) { builder.append("\n\t\t").append(replyUrl); } System.out.println(builder.toString()); } /** * Print Service Principal info. * * @param servicePrincipal service principal */ public static void print(ServicePrincipal servicePrincipal) { StringBuilder builder = new StringBuilder() .append("Service Principal: ").append(servicePrincipal.id()) .append("\n\tName: ").append(servicePrincipal.name()) .append("\n\tApplication Id: ").append(servicePrincipal.applicationId()); List<String> names = servicePrincipal.servicePrincipalNames(); builder.append("\n\tNames: ").append(names.size()); for (String name : names) { builder.append("\n\t\tName: ").append(name); } System.out.println(builder.toString()); } /** * Print Network Watcher info. * * @param nw network watcher */ public static void print(NetworkWatcher nw) { StringBuilder builder = new StringBuilder() .append("Network Watcher: ").append(nw.id()) .append("\n\tName: ").append(nw.name()) .append("\n\tResource group name: ").append(nw.resourceGroupName()) .append("\n\tRegion name: ").append(nw.regionName()); System.out.println(builder.toString()); } /** * Print packet capture info. * * @param resource packet capture */ public static void print(PacketCapture resource) { StringBuilder sb = new StringBuilder().append("Packet Capture: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tTarget id: ").append(resource.targetId()) .append("\n\tTime limit in seconds: ").append(resource.timeLimitInSeconds()) .append("\n\tBytes to capture per packet: ").append(resource.bytesToCapturePerPacket()) .append("\n\tProvisioning state: ").append(resource.provisioningState()) .append("\n\tStorage location:") .append("\n\tStorage account id: ").append(resource.storageLocation().storageId()) .append("\n\tStorage account path: ").append(resource.storageLocation().storagePath()) .append("\n\tFile path: ").append(resource.storageLocation().filePath()) .append("\n\t Packet capture filters: ").append(resource.filters().size()); for (PacketCaptureFilter filter : resource.filters()) { sb.append("\n\t\tProtocol: ").append(filter.protocol()); sb.append("\n\t\tLocal IP address: ").append(filter.localIpAddress()); sb.append("\n\t\tRemote IP address: ").append(filter.remoteIpAddress()); sb.append("\n\t\tLocal port: ").append(filter.localPort()); sb.append("\n\t\tRemote port: ").append(filter.remotePort()); } System.out.println(sb.toString()); } /** * Print verification IP flow info. * * @param resource IP flow verification info */ public static void print(VerificationIPFlow resource) { System.out.println(new StringBuilder("IP flow verification: ") .append("\n\tAccess: ").append(resource.access()) .append("\n\tRule name: ").append(resource.ruleName()) .toString()); } /** * Print topology info. * * @param resource topology */ public static void print(Topology resource) { StringBuilder sb = new StringBuilder().append("Topology: ").append(resource.id()) .append("\n\tTopology parameters: ") .append("\n\t\tResource group: ").append(resource.topologyParameters().targetResourceGroupName()) .append("\n\t\tVirtual network: ").append(resource.topologyParameters().targetVirtualNetwork() == null ? "" : resource.topologyParameters().targetVirtualNetwork().id()) .append("\n\t\tSubnet id: ").append(resource.topologyParameters().targetSubnet() == null ? "" : resource.topologyParameters().targetSubnet().id()) .append("\n\tCreated time: ").append(resource.createdTime()) .append("\n\tLast modified time: ").append(resource.lastModifiedTime()); for (TopologyResource tr : resource.resources().values()) { sb.append("\n\tTopology resource: ").append(tr.id()) .append("\n\t\tName: ").append(tr.name()) .append("\n\t\tLocation: ").append(tr.location()) .append("\n\t\tAssociations:"); for (TopologyAssociation association : tr.associations()) { sb.append("\n\t\t\tName:").append(association.name()) .append("\n\t\t\tResource id:").append(association.resourceId()) .append("\n\t\t\tAssociation type:").append(association.associationType()); } } System.out.println(sb.toString()); } /** * Print flow log settings info. * * @param resource flow log settings */ public static void print(FlowLogSettings resource) { System.out.println(new StringBuilder().append("Flow log settings: ") .append("Target resource id: ").append(resource.targetResourceId()) .append("\n\tFlow log enabled: ").append(resource.enabled()) .append("\n\tStorage account id: ").append(resource.storageId()) .append("\n\tRetention policy enabled: ").append(resource.isRetentionEnabled()) .append("\n\tRetention policy days: ").append(resource.retentionDays()) .toString()); } /** * Print availability set info. * * @param resource an availability set */ public static void print(SecurityGroupView resource) { StringBuilder sb = new StringBuilder().append("Security group view: ") .append("\n\tVirtual machine id: ").append(resource.vmId()); for (SecurityGroupNetworkInterface sgni : resource.networkInterfaces().values()) { sb.append("\n\tSecurity group network interface:").append(sgni.id()) .append("\n\t\tSecurity group network interface:") .append("\n\t\tEffective security rules:"); for (EffectiveNetworkSecurityRule rule : sgni.securityRuleAssociations().effectiveSecurityRules()) { sb.append("\n\t\t\tName: ").append(rule.name()) .append("\n\t\t\tDirection: ").append(rule.direction()) .append("\n\t\t\tAccess: ").append(rule.access()) .append("\n\t\t\tPriority: ").append(rule.priority()) .append("\n\t\t\tSource address prefix: ").append(rule.sourceAddressPrefix()) .append("\n\t\t\tSource port range: ").append(rule.sourcePortRange()) .append("\n\t\t\tDestination address prefix: ").append(rule.destinationAddressPrefix()) .append("\n\t\t\tDestination port range: ").append(rule.destinationPortRange()) .append("\n\t\t\tProtocol: ").append(rule.protocol()); } sb.append("\n\t\tSubnet:").append(sgni.securityRuleAssociations().subnetAssociation().id()); printSecurityRule(sb, sgni.securityRuleAssociations().subnetAssociation().securityRules()); if (sgni.securityRuleAssociations().networkInterfaceAssociation() != null) { sb.append("\n\t\tNetwork interface:").append(sgni.securityRuleAssociations().networkInterfaceAssociation().id()); printSecurityRule(sb, sgni.securityRuleAssociations().networkInterfaceAssociation().securityRules()); } sb.append("\n\t\tDefault security rules:"); printSecurityRule(sb, sgni.securityRuleAssociations().defaultSecurityRules()); } System.out.println(sb.toString()); } private static void printSecurityRule(StringBuilder sb, List<SecurityRuleInner> rules) { for (SecurityRuleInner rule : rules) { sb.append("\n\t\t\tName: ").append(rule.name()) .append("\n\t\t\tDirection: ").append(rule.direction()) .append("\n\t\t\tAccess: ").append(rule.access()) .append("\n\t\t\tPriority: ").append(rule.priority()) .append("\n\t\t\tSource address prefix: ").append(rule.sourceAddressPrefix()) .append("\n\t\t\tSource port range: ").append(rule.sourcePortRange()) .append("\n\t\t\tDestination address prefix: ").append(rule.destinationAddressPrefix()) .append("\n\t\t\tDestination port range: ").append(rule.destinationPortRange()) .append("\n\t\t\tProtocol: ").append(rule.protocol()) .append("\n\t\t\tDescription: ").append(rule.description()) .append("\n\t\t\tProvisioning state: ").append(rule.provisioningState()); } } /** * Print next hop info. * * @param resource an availability set */ public static void print(NextHop resource) { System.out.println(new StringBuilder("Next hop: ") .append("Next hop type: ").append(resource.nextHopType()) .append("\n\tNext hop ip address: ").append(resource.nextHopIpAddress()) .append("\n\tRoute table id: ").append(resource.routeTableId()) .toString()); } /** * Print container group info. * * @param resource a container group */ public static void print(ContainerGroup resource) { StringBuilder info = new StringBuilder().append("Container Group: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tOS type: ").append(resource.osType()); if (resource.ipAddress() != null) { info.append("\n\tPublic IP address: ").append(resource.ipAddress()); } if (resource.externalTcpPorts() != null) { info.append("\n\tExternal TCP ports:"); for (int port : resource.externalTcpPorts()) { info.append(" ").append(port); } } if (resource.externalUdpPorts() != null) { info.append("\n\tExternal UDP ports:"); for (int port : resource.externalUdpPorts()) { info.append(" ").append(port); } } if (resource.imageRegistryServers() != null) { info.append("\n\tPrivate Docker image registries:"); for (String server : resource.imageRegistryServers()) { info.append(" ").append(server); } } if (resource.volumes() != null) { info.append("\n\tVolume mapping: "); for (Map.Entry<String, Volume> entry : resource.volumes().entrySet()) { info.append("\n\t\tName: ").append(entry.getKey()).append(" -> ") .append(entry.getValue().azureFile() != null ? entry.getValue().azureFile().shareName() : "empty direcory volume"); } } if (resource.containers() != null) { info.append("\n\tContainer instances: "); for (Map.Entry<String, Container> entry : resource.containers().entrySet()) { Container container = entry.getValue(); info.append("\n\t\tName: ").append(entry.getKey()).append(" -> ").append(container.image()); info.append("\n\t\t\tResources: "); info.append(container.resources().requests().cpu()).append("CPUs "); info.append(container.resources().requests().memoryInGB()).append("GB"); info.append("\n\t\t\tPorts:"); for (ContainerPort port : container.ports()) { info.append(" ").append(port.port()); } if (container.volumeMounts() != null) { info.append("\n\t\t\tVolume mounts:"); for (VolumeMount volumeMount : container.volumeMounts()) { info.append(" ").append(volumeMount.name()).append("->").append(volumeMount.mountPath()); } } if (container.command() != null) { info.append("\n\t\t\tStart commands:"); for (String command : container.command()) { info.append("\n\t\t\t\t").append(command); } } if (container.environmentVariables() != null) { info.append("\n\t\t\tENV vars:"); for (EnvironmentVariable envVar : container.environmentVariables()) { info.append("\n\t\t\t\t").append(envVar.name()).append("=").append(envVar.value()); } } } } System.out.println(info.toString()); } /** * Print event hub namespace. * * @param resource a virtual machine */ public static void print(EventHubNamespace resource) { StringBuilder info = new StringBuilder(); info.append("Eventhub Namespace: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tAzureInsightMetricId: ").append(resource.azureInsightMetricId()) .append("\n\tIsAutoScale enabled: ").append(resource.isAutoScaleEnabled()) .append("\n\tServiceBus endpoint: ").append(resource.serviceBusEndpoint()) .append("\n\tThroughPut upper limit: ").append(resource.throughputUnitsUpperLimit()) .append("\n\tCurrent ThroughPut: ").append(resource.currentThroughputUnits()) .append("\n\tCreated time: ").append(resource.createdAt()) .append("\n\tUpdated time: ").append(resource.updatedAt()); System.out.println(info.toString()); } /** * Print event hub. * * @param resource event hub */ public static void print(EventHub resource) { StringBuilder info = new StringBuilder(); info.append("Eventhub: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tNamespace resource group: ").append(resource.namespaceResourceGroupName()) .append("\n\tNamespace: ").append(resource.namespaceName()) .append("\n\tIs data capture enabled: ").append(resource.isDataCaptureEnabled()) .append("\n\tPartition ids: ").append(resource.partitionIds()); if (resource.isDataCaptureEnabled()) { info.append("\n\t\t\tData capture window size in MB: ").append(resource.dataCaptureWindowSizeInMB()); info.append("\n\t\t\tData capture window size in seconds: ").append(resource.dataCaptureWindowSizeInSeconds()); if (resource.captureDestination() != null) { info.append("\n\t\t\tData capture storage account: ").append(resource.captureDestination().storageAccountResourceId()); info.append("\n\t\t\tData capture storage container: ").append(resource.captureDestination().blobContainer()); } } System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing. * * @param resource event hub namespace disaster recovery pairing */ public static void print(EventHubDisasterRecoveryPairing resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tPrimary namespace resource group name: ").append(resource.primaryNamespaceResourceGroupName()) .append("\n\tPrimary namespace name: ").append(resource.primaryNamespaceName()) .append("\n\tSecondary namespace: ").append(resource.secondaryNamespaceId()) .append("\n\tNamespace role: ").append(resource.namespaceRole()); System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing auth rules. * * @param resource event hub namespace disaster recovery pairing auth rule */ public static void print(DisasterRecoveryPairingAuthorizationRule resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing auth rule: ").append(resource.name()); List<String> rightsStr = new ArrayList<>(); for (AccessRights rights : resource.rights()) { rightsStr.add(rights.toString()); } info.append("\n\tRights: ").append(rightsStr); System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing auth rule key. * * @param resource event hub namespace disaster recovery pairing auth rule key */ public static void print(DisasterRecoveryPairingAuthorizationKey resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing auth key: ") .append("\n\t Alias primary connection string: ").append(resource.aliasPrimaryConnectionString()) .append("\n\t Alias secondary connection string: ").append(resource.aliasSecondaryConnectionString()) .append("\n\t Primary key: ").append(resource.primaryKey()) .append("\n\t Secondary key: ").append(resource.secondaryKey()) .append("\n\t Primary connection string: ").append(resource.primaryConnectionString()) .append("\n\t Secondary connection string: ").append(resource.secondaryConnectionString()); System.out.println(info.toString()); } /** * Print event hub consumer group. * * @param resource event hub consumer group */ public static void print(EventHubConsumerGroup resource) { StringBuilder info = new StringBuilder(); info.append("Event hub consumer group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tNamespace resource group: ").append(resource.namespaceResourceGroupName()) .append("\n\tNamespace: ").append(resource.namespaceName()) .append("\n\tEvent hub name: ").append(resource.eventHubName()) .append("\n\tUser metadata: ").append(resource.userMetadata()); System.out.println(info.toString()); } /** * Print Diagnostic Setting. * * @param resource Diagnostic Setting instance */ public static void print(DiagnosticSetting resource) { StringBuilder info = new StringBuilder("Diagnostic Setting: ") .append("\n\tId: ").append(resource.id()) .append("\n\tAssociated resource Id: ").append(resource.resourceId()) .append("\n\tName: ").append(resource.name()) .append("\n\tStorage Account Id: ").append(resource.storageAccountId()) .append("\n\tEventHub Namespace Autorization Rule Id: ").append(resource.eventHubAuthorizationRuleId()) .append("\n\tEventHub name: ").append(resource.eventHubName()) .append("\n\tLog Analytics workspace Id: ").append(resource.workspaceId()); if (resource.logs() != null && !resource.logs().isEmpty()) { info.append("\n\tLog Settings: "); for (LogSettings ls : resource.logs()) { info.append("\n\t\tCategory: ").append(ls.category()); info.append("\n\t\tRetention policy: "); if (ls.retentionPolicy() != null) { info.append(ls.retentionPolicy().days() + " days"); } else { info.append("NONE"); } } } if (resource.metrics() != null && !resource.metrics().isEmpty()) { info.append("\n\tMetric Settings: "); for (MetricSettings ls : resource.metrics()) { info.append("\n\t\tCategory: ").append(ls.category()); info.append("\n\t\tTimegrain: ").append(ls.timeGrain()); info.append("\n\t\tRetention policy: "); if (ls.retentionPolicy() != null) { info.append(ls.retentionPolicy().days() + " days"); } else { info.append("NONE"); } } } System.out.println(info.toString()); } /** * Print Action group settings. * * @param actionGroup action group instance */ public static void print(ActionGroup actionGroup) { StringBuilder info = new StringBuilder("Action Group: ") .append("\n\tId: ").append(actionGroup.id()) .append("\n\tName: ").append(actionGroup.name()) .append("\n\tShort Name: ").append(actionGroup.shortName()); if (actionGroup.emailReceivers() != null && !actionGroup.emailReceivers().isEmpty()) { info.append("\n\tEmail receivers: "); for (EmailReceiver er : actionGroup.emailReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tEMail: ").append(er.emailAddress()); info.append("\n\t\tStatus: ").append(er.status()); info.append("\n\t\t==="); } } if (actionGroup.smsReceivers() != null && !actionGroup.smsReceivers().isEmpty()) { info.append("\n\tSMS text message receivers: "); for (SmsReceiver er : actionGroup.smsReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tPhone: ").append(er.countryCode() + er.phoneNumber()); info.append("\n\t\tStatus: ").append(er.status()); info.append("\n\t\t==="); } } if (actionGroup.webhookReceivers() != null && !actionGroup.webhookReceivers().isEmpty()) { info.append("\n\tWebhook receivers: "); for (WebhookReceiver er : actionGroup.webhookReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tURI: ").append(er.serviceUri()); info.append("\n\t\t==="); } } if (actionGroup.pushNotificationReceivers() != null && !actionGroup.pushNotificationReceivers().isEmpty()) { info.append("\n\tApp Push Notification receivers: "); for (AzureAppPushReceiver er : actionGroup.pushNotificationReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tEmail: ").append(er.emailAddress()); info.append("\n\t\t==="); } } if (actionGroup.voiceReceivers() != null && !actionGroup.voiceReceivers().isEmpty()) { info.append("\n\tVoice Message receivers: "); for (VoiceReceiver er : actionGroup.voiceReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tPhone: ").append(er.countryCode() + er.phoneNumber()); info.append("\n\t\t==="); } } if (actionGroup.automationRunbookReceivers() != null && !actionGroup.automationRunbookReceivers().isEmpty()) { info.append("\n\tAutomation Runbook receivers: "); for (AutomationRunbookReceiver er : actionGroup.automationRunbookReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tRunbook Name: ").append(er.runbookName()); info.append("\n\t\tAccount Id: ").append(er.automationAccountId()); info.append("\n\t\tIs Global: ").append(er.isGlobalRunbook()); info.append("\n\t\tService URI: ").append(er.serviceUri()); info.append("\n\t\tWebhook resource Id: ").append(er.webhookResourceId()); info.append("\n\t\t==="); } } if (actionGroup.azureFunctionReceivers() != null && !actionGroup.azureFunctionReceivers().isEmpty()) { info.append("\n\tAzure Functions receivers: "); for (AzureFunctionReceiver er : actionGroup.azureFunctionReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tFunction Name: ").append(er.functionName()); info.append("\n\t\tFunction App Resource Id: ").append(er.functionAppResourceId()); info.append("\n\t\tFunction Trigger URI: ").append(er.httpTriggerUrl()); info.append("\n\t\t==="); } } if (actionGroup.logicAppReceivers() != null && !actionGroup.logicAppReceivers().isEmpty()) { info.append("\n\tLogic App receivers: "); for (LogicAppReceiver er : actionGroup.logicAppReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tResource Id: ").append(er.resourceId()); info.append("\n\t\tCallback URL: ").append(er.callbackUrl()); info.append("\n\t\t==="); } } if (actionGroup.itsmReceivers() != null && !actionGroup.itsmReceivers().isEmpty()) { info.append("\n\tITSM receivers: "); for (ItsmReceiver er : actionGroup.itsmReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tWorkspace Id: ").append(er.workspaceId()); info.append("\n\t\tConnection Id: ").append(er.connectionId()); info.append("\n\t\tRegion: ").append(er.region()); info.append("\n\t\tTicket Configuration: ").append(er.ticketConfiguration()); info.append("\n\t\t==="); } } System.out.println(info.toString()); } /** * Print activity log alert settings. * * @param activityLogAlert activity log instance */ public static void print(ActivityLogAlert activityLogAlert) { StringBuilder info = new StringBuilder("Activity Log Alert: ") .append("\n\tId: ").append(activityLogAlert.id()) .append("\n\tName: ").append(activityLogAlert.name()) .append("\n\tDescription: ").append(activityLogAlert.description()) .append("\n\tIs Enabled: ").append(activityLogAlert.enabled()); if (activityLogAlert.scopes() != null && !activityLogAlert.scopes().isEmpty()) { info.append("\n\tScopes: "); for (String er : activityLogAlert.scopes()) { info.append("\n\t\tId: ").append(er); } } if (activityLogAlert.actionGroupIds() != null && !activityLogAlert.actionGroupIds().isEmpty()) { info.append("\n\tAction Groups: "); for (String er : activityLogAlert.actionGroupIds()) { info.append("\n\t\tAction Group Id: ").append(er); } } if (activityLogAlert.equalsConditions() != null && !activityLogAlert.equalsConditions().isEmpty()) { info.append("\n\tAlert conditions (when all of is true): "); for (Map.Entry<String, String> er : activityLogAlert.equalsConditions().entrySet()) { info.append("\n\t\t'").append(er.getKey()).append("' equals '").append(er.getValue()).append("'"); } } System.out.println(info.toString()); } /** * Print metric alert settings. * * @param metricAlert metric alert instance */ public static void print(MetricAlert metricAlert) { StringBuilder info = new StringBuilder("Metric Alert: ") .append("\n\tId: ").append(metricAlert.id()) .append("\n\tName: ").append(metricAlert.name()) .append("\n\tDescription: ").append(metricAlert.description()) .append("\n\tIs Enabled: ").append(metricAlert.enabled()) .append("\n\tIs Auto Mitigated: ").append(metricAlert.autoMitigate()) .append("\n\tSeverity: ").append(metricAlert.severity()) .append("\n\tWindow Size: ").append(metricAlert.windowSize()) .append("\n\tEvaluation Frequency: ").append(metricAlert.evaluationFrequency()); if (metricAlert.scopes() != null && !metricAlert.scopes().isEmpty()) { info.append("\n\tScopes: "); for (String er : metricAlert.scopes()) { info.append("\n\t\tId: ").append(er); } } if (metricAlert.actionGroupIds() != null && !metricAlert.actionGroupIds().isEmpty()) { info.append("\n\tAction Groups: "); for (String er : metricAlert.actionGroupIds()) { info.append("\n\t\tAction Group Id: ").append(er); } } if (metricAlert.alertCriterias() != null && !metricAlert.alertCriterias().isEmpty()) { info.append("\n\tAlert conditions (when all of is true): "); for (Map.Entry<String, MetricAlertCondition> er : metricAlert.alertCriterias().entrySet()) { MetricAlertCondition alertCondition = er.getValue(); info.append("\n\t\tCondition name: ").append(er.getKey()) .append("\n\t\tSignal name: ").append(alertCondition.metricName()) .append("\n\t\tMetric Namespace: ").append(alertCondition.metricNamespace()) .append("\n\t\tOperator: ").append(alertCondition.condition()) .append("\n\t\tThreshold: ").append(alertCondition.threshold()) .append("\n\t\tTime Aggregation: ").append(alertCondition.timeAggregation()); if (alertCondition.dimensions() != null && !alertCondition.dimensions().isEmpty()) { for (MetricDimension dimon : alertCondition.dimensions()) { info.append("\n\t\tDimension Filter: ").append("Name [").append(dimon.name()).append("] operator [Include] values["); for (String vals : dimon.values()) { info.append(vals).append(", "); } info.append("]"); } } } } System.out.println(info.toString()); } /** * Print spring service settings. * * @param springService spring service instance */ public static void print(SpringService springService) { StringBuilder info = new StringBuilder("Spring Service: ") .append("\n\tId: ").append(springService.id()) .append("\n\tName: ").append(springService.name()) .append("\n\tResource Group: ").append(springService.resourceGroupName()) .append("\n\tRegion: ").append(springService.region()) .append("\n\tTags: ").append(springService.tags()); ConfigServerProperties serverProperties = springService.getServerProperties(); if (serverProperties != null && serverProperties.provisioningState() != null && serverProperties.provisioningState().equals(ConfigServerState.SUCCEEDED) && serverProperties.configServer() != null) { info.append("\n\tProperties: "); if (serverProperties.configServer().gitProperty() != null) { info.append("\n\t\tGit: ").append(serverProperties.configServer().gitProperty().uri()); } } if (springService.sku() != null) { info.append("\n\tSku: ") .append("\n\t\tName: ").append(springService.sku().name()) .append("\n\t\tTier: ").append(springService.sku().tier()) .append("\n\t\tCapacity: ").append(springService.sku().capacity()); } MonitoringSettingProperties monitoringSettingProperties = springService.getMonitoringSetting(); if (monitoringSettingProperties != null && monitoringSettingProperties.provisioningState() != null && monitoringSettingProperties.provisioningState().equals(MonitoringSettingState.SUCCEEDED)) { info.append("\n\tTrace: ") .append("\n\t\tEnabled: ").append(monitoringSettingProperties.traceEnabled()) .append("\n\t\tApp Insight Instrumentation Key: ").append(monitoringSettingProperties.appInsightsInstrumentationKey()); } System.out.println(info.toString()); } /** * Print spring app settings. * * @param springApp spring app instance */ public static void print(SpringApp springApp) { StringBuilder info = new StringBuilder("Spring Service: ") .append("\n\tId: ").append(springApp.id()) .append("\n\tName: ").append(springApp.name()) .append("\n\tPublic Endpoint: ").append(springApp.isPublic()) .append("\n\tUrl: ").append(springApp.url()) .append("\n\tHttps Only: ").append(springApp.isHttpsOnly()) .append("\n\tFully Qualified Domain Name: ").append(springApp.fqdn()) .append("\n\tActive Deployment Name: ").append(springApp.activeDeploymentName()); if (springApp.temporaryDisk() != null) { info.append("\n\tTemporary Disk:") .append("\n\t\tSize In GB: ").append(springApp.temporaryDisk().sizeInGB()) .append("\n\t\tMount Path: ").append(springApp.temporaryDisk().mountPath()); } if (springApp.persistentDisk() != null) { info.append("\n\tPersistent Disk:") .append("\n\t\tSize In GB: ").append(springApp.persistentDisk().sizeInGB()) .append("\n\t\tMount Path: ").append(springApp.persistentDisk().mountPath()); } if (springApp.identity() != null) { info.append("\n\tIdentity:") .append("\n\t\tType: ").append(springApp.identity().type()) .append("\n\t\tPrincipal Id: ").append(springApp.identity().principalId()) .append("\n\t\tTenant Id: ").append(springApp.identity().tenantId()); } System.out.println(info.toString()); } /** * Print private link resource. * * @param privateLinkResource the private link resource */ public static void print(PrivateLinkResource privateLinkResource) { StringBuilder info = new StringBuilder("Private Link Resource: ") .append("\n\tGroup ID: ").append(privateLinkResource.groupId()) .append("\n\tRequired Member Names: ").append(privateLinkResource.requiredMemberNames()) .append("\n\tRequired DNS Zone Names: ").append(privateLinkResource.requiredDnsZoneNames()); System.out.println(info); } /** * Print private endpoint. * * @param privateEndpoint the private endpoint */ public static void print(PrivateEndpoint privateEndpoint) { StringBuilder info = new StringBuilder("Private Endpoint: ") .append("\n\tId: ").append(privateEndpoint.id()) .append("\n\tName: ").append(privateEndpoint.name()); if (privateEndpoint.privateLinkServiceConnections() != null && !privateEndpoint.privateLinkServiceConnections().isEmpty()) { for (PrivateEndpoint.PrivateLinkServiceConnection connection : privateEndpoint.privateLinkServiceConnections().values()) { info .append("\n\t\tPrivate Link Service Connection Name: ").append(connection.name()) .append("\n\t\tPrivate Link Resource ID: ").append(connection.privateLinkResourceId()) .append("\n\t\tSub Resource Names: ").append(connection.subResourceNames()) .append("\n\t\tProvision Status: ").append(connection.state().status()); } } if (privateEndpoint.privateLinkServiceConnections() != null && !privateEndpoint.privateLinkServiceConnections().isEmpty()) { info.append("\n\tPrivate Link Service Connections:"); for (PrivateEndpoint.PrivateLinkServiceConnection connection : privateEndpoint.privateLinkServiceConnections().values()) { info .append("\n\t\tName: ").append(connection.name()) .append("\n\t\tPrivate Link Resource ID: ").append(connection.privateLinkResourceId()) .append("\n\t\tSub Resource Names: ").append(connection.subResourceNames()) .append("\n\t\tStatus: ").append(connection.state().status()); } } if (privateEndpoint.customDnsConfigurations() != null && !privateEndpoint.customDnsConfigurations().isEmpty()) { info.append("\n\tCustom DNS Configure:"); for (CustomDnsConfigPropertiesFormat customDns : privateEndpoint.customDnsConfigurations()) { info .append("\n\t\tFQDN: ").append(customDns.fqdn()) .append("\n\t\tIP Address: ").append(customDns.ipAddresses()); } } System.out.println(info); } /** * Sends a GET request to target URL. * <p> * Retry logic tuned for AppService. * The method does not handle 301 redirect. * * @param urlString the target URL. * @return Content of the HTTP response. */ public static String sendGetRequest(String urlString) { HttpRequest request = new HttpRequest(HttpMethod.GET, urlString); Mono<Response<String>> response = stringResponse(HTTP_PIPELINE.send(request) .flatMap(response1 -> { int code = response1.getStatusCode(); if (code == 200 || code == 400 || code == 404) { return Mono.just(response1); } else { return Mono.error(new HttpResponseException(response1)); } }) .retryWhen(Retry .fixedDelay(5, Duration.ofSeconds(30)) .filter(t -> { boolean retry = false; if (t instanceof TimeoutException) { retry = true; } else if (t instanceof HttpResponseException && ((HttpResponseException) t).getResponse().getStatusCode() == 503) { retry = true; } if (retry) { LOGGER.info("retry GET request to {}", urlString); } return retry; }))); Response<String> ret = response.block(); return ret == null ? null : ret.getValue(); } /** * Sends a POST request to target URL. * <p> * Retry logic tuned for AppService. * * @param urlString the target URL. * @param body the request body. * @return Content of the HTTP response. * */ public static String sendPostRequest(String urlString, String body) { try { HttpRequest request = new HttpRequest(HttpMethod.POST, urlString).setBody(body); Mono<Response<String>> response = stringResponse(HTTP_PIPELINE.send(request) .flatMap(response1 -> { int code = response1.getStatusCode(); if (code == 200 || code == 400 || code == 404) { return Mono.just(response1); } else { return Mono.error(new HttpResponseException(response1)); } }) .retryWhen(Retry .fixedDelay(5, Duration.ofSeconds(30)) .filter(t -> { boolean retry = false; if (t instanceof TimeoutException) { retry = true; } if (retry) { LOGGER.info("retry POST request to {}", urlString); } return retry; }))); Response<String> ret = response.block(); return ret == null ? null : ret.getValue(); } catch (Exception e) { LOGGER.logThrowableAsError(e); return null; } } private static Mono<Response<String>> stringResponse(Mono<HttpResponse> responseMono) { return responseMono.flatMap(response -> response.getBodyAsString() .map(str -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), str))); } private static final HttpPipeline HTTP_PIPELINE = new HttpPipelineBuilder() .policies( new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)), new RetryPolicy("Retry-After", ChronoUnit.SECONDS)) .build(); /** * Get the size of the iterable. * * @param iterable iterable to count size * @param <T> generic type parameter of the iterable * @return size of the iterable */ public static <T> int getSize(Iterable<T> iterable) { int res = 0; Iterator<T> iterator = iterable.iterator(); while (iterator.hasNext()) { iterator.next(); res++; } return res; } }
for (String notAction : permission.dataActions()) {
public static String getSecondaryServicePrincipalClientID(String envSecondaryServicePrincipal) throws IOException { String content = new String(Files.readAllBytes(new File(envSecondaryServicePrincipal).toPath()), StandardCharsets.UTF_8).trim(); HashMap<String, String> auth = new HashMap<>(); if (content.startsWith("{")) { auth = new JacksonAdapter().deserialize(content, auth.getClass(), SerializerEncoding.JSON); return auth.get("clientId"); } else { Properties authSettings = new Properties(); try (FileInputStream credentialsFileStream = new FileInputStream(new File(envSecondaryServicePrincipal))) { authSettings.load(credentialsFileStream); } return authSettings.getProperty("client"); } } /** * Retrieve the secondary service principal secret. * * @param envSecondaryServicePrincipal an Azure Container Registry * @return a service principal secret * @throws IOException exception */ public static String getSecondaryServicePrincipalSecret(String envSecondaryServicePrincipal) throws IOException { String content = new String(Files.readAllBytes(new File(envSecondaryServicePrincipal).toPath()), StandardCharsets.UTF_8).trim(); HashMap<String, String> auth = new HashMap<>(); if (content.startsWith("{")) { auth = new JacksonAdapter().deserialize(content, auth.getClass(), SerializerEncoding.JSON); return auth.get("clientSecret"); } else { Properties authSettings = new Properties(); try (FileInputStream credentialsFileStream = new FileInputStream(new File(envSecondaryServicePrincipal))) { authSettings.load(credentialsFileStream); } return authSettings.getProperty("key"); } } /** * This method creates a certificate for given password. * * @param certPath location of certificate file * @param pfxPath location of pfx file * @param alias User alias * @param password alias password * @param cnName domain name * @param dnsName dns name in subject alternate name * @throws IOException IO Exception */ public static void createCertificate(String certPath, String pfxPath, String alias, String password, String cnName, String dnsName) throws IOException { if (new File(pfxPath).exists()) { return; } String validityInDays = "3650"; String keyAlg = "RSA"; String sigAlg = "SHA1withRSA"; String keySize = "2048"; String storeType = "pkcs12"; String command = "keytool"; String jdkPath = System.getProperty("java.home"); if (jdkPath != null && !jdkPath.isEmpty()) { jdkPath = jdkPath.concat("\\bin"); if (new File(jdkPath).isDirectory()) { command = String.format("%s%s%s", jdkPath, File.separator, command); } } else { return; } String[] commandArgs = {command, "-genkey", "-alias", alias, "-keystore", pfxPath, "-storepass", password, "-validity", validityInDays, "-keyalg", keyAlg, "-sigalg", sigAlg, "-keysize", keySize, "-storetype", storeType, "-dname", "CN=" + cnName, "-ext", "EKU=1.3.6.1.5.5.7.3.1"}; if (dnsName != null) { List<String> args = new ArrayList<>(Arrays.asList(commandArgs)); args.add("-ext"); args.add("san=dns:" + dnsName); commandArgs = args.toArray(new String[0]); } Utils.cmdInvocation(commandArgs, true); File pfxFile = new File(pfxPath); if (pfxFile.exists()) { String[] certCommandArgs = {command, "-export", "-alias", alias, "-storetype", storeType, "-keystore", pfxPath, "-storepass", password, "-rfc", "-file", certPath}; Utils.cmdInvocation(certCommandArgs, true); File cerFile = new File(pfxPath); if (!cerFile.exists()) { throw new IOException( "Error occurred while creating certificate" + String.join(" ", certCommandArgs)); } } else { throw new IOException("Error occurred while creating certificates" + String.join(" ", commandArgs)); } } /** * This method is used for invoking native commands. * * @param command :- command to invoke. * @param ignoreErrorStream : Boolean which controls whether to throw exception or not * based on error stream. * @return result :- depending on the method invocation. * @throws IOException exceptions thrown from the execution */ public static String cmdInvocation(String[] command, boolean ignoreErrorStream) throws IOException { String result = ""; String error = ""; Process process = new ProcessBuilder(command).start(); try ( InputStream inputStream = process.getInputStream(); InputStream errorStream = process.getErrorStream(); BufferedReader br = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); BufferedReader ebr = new BufferedReader(new InputStreamReader(errorStream, StandardCharsets.UTF_8)); ) { result = br.readLine(); process.waitFor(); error = ebr.readLine(); if (error != null && (!"".equals(error))) { if (!ignoreErrorStream) { throw new IOException(error, null); } } } catch (Exception e) { throw new RuntimeException("Exception occurred while invoking command", e); } return result; } /** * Prints information for passed SQL Server. * * @param sqlServer sqlServer to be printed */ public static void print(SqlServer sqlServer) { StringBuilder builder = new StringBuilder().append("Sql Server: ").append(sqlServer.id()) .append("Name: ").append(sqlServer.name()) .append("\n\tResource group: ").append(sqlServer.resourceGroupName()) .append("\n\tRegion: ").append(sqlServer.region()) .append("\n\tSqlServer version: ").append(sqlServer.version()) .append("\n\tFully qualified name for Sql Server: ").append(sqlServer.fullyQualifiedDomainName()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL Database. * * @param database database to be printed */ public static void print(SqlDatabase database) { StringBuilder builder = new StringBuilder().append("Sql Database: ").append(database.id()) .append("Name: ").append(database.name()) .append("\n\tResource group: ").append(database.resourceGroupName()) .append("\n\tRegion: ").append(database.region()) .append("\n\tSqlServer Name: ").append(database.sqlServerName()) .append("\n\tEdition of SQL database: ").append(database.edition()) .append("\n\tCollation of SQL database: ").append(database.collation()) .append("\n\tCreation date of SQL database: ").append(database.creationDate()) .append("\n\tIs data warehouse: ").append(database.isDataWarehouse()) .append("\n\tRequested service objective of SQL database: ").append(database.requestedServiceObjectiveName()) .append("\n\tName of current service objective of SQL database: ").append(database.currentServiceObjectiveName()) .append("\n\tMax size bytes of SQL database: ").append(database.maxSizeBytes()) .append("\n\tDefault secondary location of SQL database: ").append(database.defaultSecondaryLocation()); System.out.println(builder.toString()); } /** * Prints information for the passed firewall rule. * * @param firewallRule firewall rule to be printed. */ public static void print(SqlFirewallRule firewallRule) { StringBuilder builder = new StringBuilder().append("Sql firewall rule: ").append(firewallRule.id()) .append("Name: ").append(firewallRule.name()) .append("\n\tResource group: ").append(firewallRule.resourceGroupName()) .append("\n\tRegion: ").append(firewallRule.region()) .append("\n\tSqlServer Name: ").append(firewallRule.sqlServerName()) .append("\n\tStart IP Address of the firewall rule: ").append(firewallRule.startIpAddress()) .append("\n\tEnd IP Address of the firewall rule: ").append(firewallRule.endIpAddress()); System.out.println(builder.toString()); } /** * Prints information for the passed virtual network rule. * * @param virtualNetworkRule virtual network rule to be printed. */ public static void print(SqlVirtualNetworkRule virtualNetworkRule) { StringBuilder builder = new StringBuilder().append("SQL virtual network rule: ").append(virtualNetworkRule.id()) .append("Name: ").append(virtualNetworkRule.name()) .append("\n\tResource group: ").append(virtualNetworkRule.resourceGroupName()) .append("\n\tSqlServer Name: ").append(virtualNetworkRule.sqlServerName()) .append("\n\tSubnet ID: ").append(virtualNetworkRule.subnetId()) .append("\n\tState: ").append(virtualNetworkRule.state()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL subscription usage metric. * * @param subscriptionUsageMetric metric to be printed. */ public static void print(SqlSubscriptionUsageMetric subscriptionUsageMetric) { StringBuilder builder = new StringBuilder().append("SQL Subscription Usage Metric: ").append(subscriptionUsageMetric.id()) .append("Name: ").append(subscriptionUsageMetric.name()) .append("\n\tDisplay Name: ").append(subscriptionUsageMetric.displayName()) .append("\n\tCurrent Value: ").append(subscriptionUsageMetric.currentValue()) .append("\n\tLimit: ").append(subscriptionUsageMetric.limit()) .append("\n\tUnit: ").append(subscriptionUsageMetric.unit()) .append("\n\tType: ").append(subscriptionUsageMetric.type()); System.out.println(builder.toString()); } /** * Prints information for the passed SQL database usage metric. * * @param dbUsageMetric metric to be printed. */ public static void print(SqlDatabaseUsageMetric dbUsageMetric) { StringBuilder builder = new StringBuilder().append("SQL Database Usage Metric") .append("Name: ").append(dbUsageMetric.name()) .append("\n\tDisplay Name: ").append(dbUsageMetric.displayName()) .append("\n\tCurrent Value: ").append(dbUsageMetric.currentValue()) .append("\n\tLimit: ").append(dbUsageMetric.limit()) .append("\n\tUnit: ").append(dbUsageMetric.unit()); System.out.println(builder.toString()); } /** * Prints information for the passed Failover Group. * * @param failoverGroup the SQL Failover Group to be printed. */ public static void print(SqlFailoverGroup failoverGroup) { StringBuilder builder = new StringBuilder().append("SQL Failover Group: ").append(failoverGroup.id()) .append("Name: ").append(failoverGroup.name()) .append("\n\tResource group: ").append(failoverGroup.resourceGroupName()) .append("\n\tSqlServer Name: ").append(failoverGroup.sqlServerName()) .append("\n\tRead-write endpoint policy: ").append(failoverGroup.readWriteEndpointPolicy()) .append("\n\tData loss grace period: ").append(failoverGroup.readWriteEndpointDataLossGracePeriodMinutes()) .append("\n\tRead-only endpoint policy: ").append(failoverGroup.readOnlyEndpointPolicy()) .append("\n\tReplication state: ").append(failoverGroup.replicationState()) .append("\n\tReplication role: ").append(failoverGroup.replicationRole()); builder.append("\n\tPartner Servers: "); for (PartnerInfo item : failoverGroup.partnerServers()) { builder .append("\n\t\tId: ").append(item.id()) .append("\n\t\tLocation: ").append(item.location()) .append("\n\t\tReplication role: ").append(item.replicationRole()); } builder.append("\n\tDatabases: "); for (String databaseId : failoverGroup.databases()) { builder.append("\n\t\tID: ").append(databaseId); } System.out.println(builder.toString()); } /** * Prints information for the passed SQL server key. * * @param serverKey virtual network rule to be printed. */ public static void print(SqlServerKey serverKey) { StringBuilder builder = new StringBuilder().append("SQL server key: ").append(serverKey.id()) .append("Name: ").append(serverKey.name()) .append("\n\tResource group: ").append(serverKey.resourceGroupName()) .append("\n\tSqlServer Name: ").append(serverKey.sqlServerName()) .append("\n\tRegion: ").append(serverKey.region() != null ? serverKey.region().name() : "") .append("\n\tServer Key Type: ").append(serverKey.serverKeyType()) .append("\n\tServer Key URI: ").append(serverKey.uri()) .append("\n\tServer Key Thumbprint: ").append(serverKey.thumbprint()) .append("\n\tServer Key Creation Date: ").append(serverKey.creationDate() != null ? serverKey.creationDate().toString() : ""); System.out.println(builder.toString()); } /** * Prints information of the elastic pool passed in. * * @param elasticPool elastic pool to be printed */ public static void print(SqlElasticPool elasticPool) { StringBuilder builder = new StringBuilder().append("Sql elastic pool: ").append(elasticPool.id()) .append("Name: ").append(elasticPool.name()) .append("\n\tResource group: ").append(elasticPool.resourceGroupName()) .append("\n\tRegion: ").append(elasticPool.region()) .append("\n\tSqlServer Name: ").append(elasticPool.sqlServerName()) .append("\n\tEdition of elastic pool: ").append(elasticPool.edition()) .append("\n\tTotal number of DTUs in the elastic pool: ").append(elasticPool.dtu()) .append("\n\tMaximum DTUs a database can get in elastic pool: ").append(elasticPool.databaseDtuMax()) .append("\n\tMinimum DTUs a database is guaranteed in elastic pool: ").append(elasticPool.databaseDtuMin()) .append("\n\tCreation date for the elastic pool: ").append(elasticPool.creationDate()) .append("\n\tState of the elastic pool: ").append(elasticPool.state()) .append("\n\tStorage capacity in MBs for the elastic pool: ").append(elasticPool.storageCapacity()); System.out.println(builder.toString()); } /** * Prints information of the elastic pool activity. * * @param elasticPoolActivity elastic pool activity to be printed */ public static void print(ElasticPoolActivity elasticPoolActivity) { StringBuilder builder = new StringBuilder().append("Sql elastic pool activity: ").append(elasticPoolActivity.id()) .append("Name: ").append(elasticPoolActivity.name()) .append("\n\tResource group: ").append(elasticPoolActivity.resourceGroupName()) .append("\n\tState: ").append(elasticPoolActivity.state()) .append("\n\tElastic pool name: ").append(elasticPoolActivity.elasticPoolName()) .append("\n\tStart time of activity: ").append(elasticPoolActivity.startTime()) .append("\n\tEnd time of activity: ").append(elasticPoolActivity.endTime()) .append("\n\tError code of activity: ").append(elasticPoolActivity.errorCode()) .append("\n\tError message of activity: ").append(elasticPoolActivity.errorMessage()) .append("\n\tError severity of activity: ").append(elasticPoolActivity.errorSeverity()) .append("\n\tOperation: ").append(elasticPoolActivity.operation()) .append("\n\tCompleted percentage of activity: ").append(elasticPoolActivity.percentComplete()); System.out.println(builder.toString()); } /** * Print an application gateway. * * @param resource an application gateway */ public static void print(ApplicationGateway resource) { StringBuilder info = new StringBuilder(); info.append("Application gateway: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tSKU: ").append(resource.sku().toString()) .append("\n\tOperational state: ").append(resource.operationalState()) .append("\n\tInternet-facing? ").append(resource.isPublic()) .append("\n\tInternal? ").append(resource.isPrivate()) .append("\n\tDefault private IP address: ").append(resource.privateIpAddress()) .append("\n\tPrivate IP address allocation method: ").append(resource.privateIpAllocationMethod()) .append("\n\tDisabled SSL protocols: ").append(resource.disabledSslProtocols().toString()); Map<String, ApplicationGatewayIpConfiguration> ipConfigs = resource.ipConfigurations(); info.append("\n\tIP configurations: ").append(ipConfigs.size()); for (ApplicationGatewayIpConfiguration ipConfig : ipConfigs.values()) { info.append("\n\t\tName: ").append(ipConfig.name()) .append("\n\t\t\tNetwork id: ").append(ipConfig.networkId()) .append("\n\t\t\tSubnet name: ").append(ipConfig.subnetName()); } Map<String, ApplicationGatewayFrontend> frontends = resource.frontends(); info.append("\n\tFrontends: ").append(frontends.size()); for (ApplicationGatewayFrontend frontend : frontends.values()) { info.append("\n\t\tName: ").append(frontend.name()) .append("\n\t\t\tPublic? ").append(frontend.isPublic()); if (frontend.isPublic()) { info.append("\n\t\t\tPublic IP address ID: ").append(frontend.publicIpAddressId()); } if (frontend.isPrivate()) { info.append("\n\t\t\tPrivate IP address: ").append(frontend.privateIpAddress()) .append("\n\t\t\tPrivate IP allocation method: ").append(frontend.privateIpAllocationMethod()) .append("\n\t\t\tSubnet name: ").append(frontend.subnetName()) .append("\n\t\t\tVirtual network ID: ").append(frontend.networkId()); } } Map<String, ApplicationGatewayBackend> backends = resource.backends(); info.append("\n\tBackends: ").append(backends.size()); for (ApplicationGatewayBackend backend : backends.values()) { info.append("\n\t\tName: ").append(backend.name()) .append("\n\t\t\tAssociated NIC IP configuration IDs: ").append(backend.backendNicIPConfigurationNames().keySet()); Collection<ApplicationGatewayBackendAddress> addresses = backend.addresses(); info.append("\n\t\t\tAddresses: ").append(addresses.size()); for (ApplicationGatewayBackendAddress address : addresses) { info.append("\n\t\t\t\tFQDN: ").append(address.fqdn()) .append("\n\t\t\t\tIP: ").append(address.ipAddress()); } } Map<String, ApplicationGatewayBackendHttpConfiguration> httpConfigs = resource.backendHttpConfigurations(); info.append("\n\tHTTP Configurations: ").append(httpConfigs.size()); for (ApplicationGatewayBackendHttpConfiguration httpConfig : httpConfigs.values()) { info.append("\n\t\tName: ").append(httpConfig.name()) .append("\n\t\t\tCookie based affinity: ").append(httpConfig.cookieBasedAffinity()) .append("\n\t\t\tPort: ").append(httpConfig.port()) .append("\n\t\t\tRequest timeout in seconds: ").append(httpConfig.requestTimeout()) .append("\n\t\t\tProtocol: ").append(httpConfig.protocol()) .append("\n\t\tHost header: ").append(httpConfig.hostHeader()) .append("\n\t\tHost header comes from backend? ").append(httpConfig.isHostHeaderFromBackend()) .append("\n\t\tConnection draining timeout in seconds: ").append(httpConfig.connectionDrainingTimeoutInSeconds()) .append("\n\t\tAffinity cookie name: ").append(httpConfig.affinityCookieName()) .append("\n\t\tPath: ").append(httpConfig.path()); ApplicationGatewayProbe probe = httpConfig.probe(); if (probe != null) { info.append("\n\t\tProbe: " + probe.name()); } info.append("\n\t\tIs probe enabled? ").append(httpConfig.isProbeEnabled()); } Map<String, ApplicationGatewaySslCertificate> sslCerts = resource.sslCertificates(); info.append("\n\tSSL certificates: ").append(sslCerts.size()); for (ApplicationGatewaySslCertificate cert : sslCerts.values()) { info.append("\n\t\tName: ").append(cert.name()) .append("\n\t\t\tCert data: ").append(cert.publicData()); } Map<String, ApplicationGatewayRedirectConfiguration> redirects = resource.redirectConfigurations(); info.append("\n\tRedirect configurations: ").append(redirects.size()); for (ApplicationGatewayRedirectConfiguration redirect : redirects.values()) { info.append("\n\t\tName: ").append(redirect.name()) .append("\n\t\tTarget URL: ").append(redirect.type()) .append("\n\t\tTarget URL: ").append(redirect.targetUrl()) .append("\n\t\tTarget listener: ").append(redirect.targetListener() != null ? redirect.targetListener().name() : null) .append("\n\t\tIs path included? ").append(redirect.isPathIncluded()) .append("\n\t\tIs query string included? ").append(redirect.isQueryStringIncluded()) .append("\n\t\tReferencing request routing rules: ").append(redirect.requestRoutingRules().values()); } Map<String, ApplicationGatewayListener> listeners = resource.listeners(); info.append("\n\tHTTP listeners: ").append(listeners.size()); for (ApplicationGatewayListener listener : listeners.values()) { info.append("\n\t\tName: ").append(listener.name()) .append("\n\t\t\tHost name: ").append(listener.hostname()) .append("\n\t\t\tServer name indication required? ").append(listener.requiresServerNameIndication()) .append("\n\t\t\tAssociated frontend name: ").append(listener.frontend().name()) .append("\n\t\t\tFrontend port name: ").append(listener.frontendPortName()) .append("\n\t\t\tFrontend port number: ").append(listener.frontendPortNumber()) .append("\n\t\t\tProtocol: ").append(listener.protocol().toString()); if (listener.sslCertificate() != null) { info.append("\n\t\t\tAssociated SSL certificate: ").append(listener.sslCertificate().name()); } } Map<String, ApplicationGatewayProbe> probes = resource.probes(); info.append("\n\tProbes: ").append(probes.size()); for (ApplicationGatewayProbe probe : probes.values()) { info.append("\n\t\tName: ").append(probe.name()) .append("\n\t\tProtocol:").append(probe.protocol().toString()) .append("\n\t\tInterval in seconds: ").append(probe.timeBetweenProbesInSeconds()) .append("\n\t\tRetries: ").append(probe.retriesBeforeUnhealthy()) .append("\n\t\tTimeout: ").append(probe.timeoutInSeconds()) .append("\n\t\tHost: ").append(probe.host()) .append("\n\t\tHealthy HTTP response status code ranges: ").append(probe.healthyHttpResponseStatusCodeRanges()) .append("\n\t\tHealthy HTTP response body contents: ").append(probe.healthyHttpResponseBodyContents()); } Map<String, ApplicationGatewayRequestRoutingRule> rules = resource.requestRoutingRules(); info.append("\n\tRequest routing rules: ").append(rules.size()); for (ApplicationGatewayRequestRoutingRule rule : rules.values()) { info.append("\n\t\tName: ").append(rule.name()) .append("\n\t\tType: ").append(rule.ruleType()) .append("\n\t\tPublic IP address ID: ").append(rule.publicIpAddressId()) .append("\n\t\tHost name: ").append(rule.hostname()) .append("\n\t\tServer name indication required? ").append(rule.requiresServerNameIndication()) .append("\n\t\tFrontend port: ").append(rule.frontendPort()) .append("\n\t\tFrontend protocol: ").append(rule.frontendProtocol().toString()) .append("\n\t\tBackend port: ").append(rule.backendPort()) .append("\n\t\tCookie based affinity enabled? ").append(rule.cookieBasedAffinity()) .append("\n\t\tRedirect configuration: ").append(rule.redirectConfiguration() != null ? rule.redirectConfiguration().name() : "(none)"); Collection<ApplicationGatewayBackendAddress> addresses = rule.backendAddresses(); info.append("\n\t\t\tBackend addresses: ").append(addresses.size()); for (ApplicationGatewayBackendAddress address : addresses) { info.append("\n\t\t\t\t") .append(address.fqdn()) .append(" [").append(address.ipAddress()).append("]"); } info.append("\n\t\t\tSSL certificate name: "); ApplicationGatewaySslCertificate cert = rule.sslCertificate(); if (cert == null) { info.append("(None)"); } else { info.append(cert.name()); } info.append("\n\t\t\tAssociated backend address pool: "); ApplicationGatewayBackend backend = rule.backend(); if (backend == null) { info.append("(None)"); } else { info.append(backend.name()); } info.append("\n\t\t\tAssociated backend HTTP settings configuration: "); ApplicationGatewayBackendHttpConfiguration config = rule.backendHttpConfiguration(); if (config == null) { info.append("(None)"); } else { info.append(config.name()); } info.append("\n\t\t\tAssociated frontend listener: "); ApplicationGatewayListener listener = rule.listener(); if (listener == null) { info.append("(None)"); } else { info.append(config.name()); } } System.out.println(info.toString()); } /** * Prints information of a virtual machine custom image. * * @param image the image */ public static void print(VirtualMachineCustomImage image) { StringBuilder builder = new StringBuilder().append("Virtual machine custom image: ").append(image.id()) .append("Name: ").append(image.name()) .append("\n\tResource group: ").append(image.resourceGroupName()) .append("\n\tCreated from virtual machine: ").append(image.sourceVirtualMachineId()); builder.append("\n\tOS disk image: ") .append("\n\t\tOperating system: ").append(image.osDiskImage().osType()) .append("\n\t\tOperating system state: ").append(image.osDiskImage().osState()) .append("\n\t\tCaching: ").append(image.osDiskImage().caching()) .append("\n\t\tSize (GB): ").append(image.osDiskImage().diskSizeGB()); if (image.isCreatedFromVirtualMachine()) { builder.append("\n\t\tSource virtual machine: ").append(image.sourceVirtualMachineId()); } if (image.osDiskImage().managedDisk() != null) { builder.append("\n\t\tSource managed disk: ").append(image.osDiskImage().managedDisk().id()); } if (image.osDiskImage().snapshot() != null) { builder.append("\n\t\tSource snapshot: ").append(image.osDiskImage().snapshot().id()); } if (image.osDiskImage().blobUri() != null) { builder.append("\n\t\tSource un-managed vhd: ").append(image.osDiskImage().blobUri()); } if (image.dataDiskImages() != null) { for (ImageDataDisk diskImage : image.dataDiskImages().values()) { builder.append("\n\tDisk Image (Lun) .append("\n\t\tCaching: ").append(diskImage.caching()) .append("\n\t\tSize (GB): ").append(diskImage.diskSizeGB()); if (image.isCreatedFromVirtualMachine()) { builder.append("\n\t\tSource virtual machine: ").append(image.sourceVirtualMachineId()); } if (diskImage.managedDisk() != null) { builder.append("\n\t\tSource managed disk: ").append(diskImage.managedDisk().id()); } if (diskImage.snapshot() != null) { builder.append("\n\t\tSource snapshot: ").append(diskImage.snapshot().id()); } if (diskImage.blobUri() != null) { builder.append("\n\t\tSource un-managed vhd: ").append(diskImage.blobUri()); } } } System.out.println(builder.toString()); } /** * Uploads a file to an Azure app service for Web App. * * @param profile the publishing profile for the app service. * @param fileName the name of the file on server * @param file the local file */ public static void uploadFileViaFtp(PublishingProfile profile, String fileName, InputStream file) { String path = "./site/wwwroot/webapps"; uploadFileViaFtp(profile, fileName, file, path); } /** * Uploads a file to an Azure app service for Function App. * * @param profile the publishing profile for the app service. * @param fileName the name of the file on server * @param file the local file */ public static void uploadFileForFunctionViaFtp(PublishingProfile profile, String fileName, InputStream file) { String path = "./site/wwwroot"; uploadFileViaFtp(profile, fileName, file, path); } private static void uploadFileViaFtp(PublishingProfile profile, String fileName, InputStream file, String path) { FTPClient ftpClient = new FTPClient(); String[] ftpUrlSegments = profile.ftpUrl().split("/", 2); String server = ftpUrlSegments[0]; if (fileName.contains("/")) { int lastslash = fileName.lastIndexOf('/'); path = path + "/" + fileName.substring(0, lastslash); fileName = fileName.substring(lastslash + 1); } try { ftpClient.connect(server); ftpClient.enterLocalPassiveMode(); ftpClient.login(profile.ftpUsername(), profile.ftpPassword()); ftpClient.setFileType(FTP.BINARY_FILE_TYPE); for (String segment : path.split("/")) { if (!ftpClient.changeWorkingDirectory(segment)) { ftpClient.makeDirectory(segment); ftpClient.changeWorkingDirectory(segment); } } ftpClient.storeFile(fileName, file); ftpClient.disconnect(); } catch (IOException e) { e.printStackTrace(); } } /** * Print service bus namespace info. * * @param serviceBusNamespace a service bus namespace */ public static void print(ServiceBusNamespace serviceBusNamespace) { StringBuilder builder = new StringBuilder() .append("Service bus Namespace: ").append(serviceBusNamespace.id()) .append("\n\tName: ").append(serviceBusNamespace.name()) .append("\n\tRegion: ").append(serviceBusNamespace.regionName()) .append("\n\tResourceGroupName: ").append(serviceBusNamespace.resourceGroupName()) .append("\n\tCreatedAt: ").append(serviceBusNamespace.createdAt()) .append("\n\tUpdatedAt: ").append(serviceBusNamespace.updatedAt()) .append("\n\tDnsLabel: ").append(serviceBusNamespace.dnsLabel()) .append("\n\tFQDN: ").append(serviceBusNamespace.fqdn()) .append("\n\tSku: ") .append("\n\t\tCapacity: ").append(serviceBusNamespace.sku().capacity()) .append("\n\t\tSkuName: ").append(serviceBusNamespace.sku().name()) .append("\n\t\tTier: ").append(serviceBusNamespace.sku().tier()); System.out.println(builder.toString()); } /** * Print service bus queue info. * * @param queue a service bus queue */ public static void print(Queue queue) { StringBuilder builder = new StringBuilder() .append("Service bus Queue: ").append(queue.id()) .append("\n\tName: ").append(queue.name()) .append("\n\tResourceGroupName: ").append(queue.resourceGroupName()) .append("\n\tCreatedAt: ").append(queue.createdAt()) .append("\n\tUpdatedAt: ").append(queue.updatedAt()) .append("\n\tAccessedAt: ").append(queue.accessedAt()) .append("\n\tActiveMessageCount: ").append(queue.activeMessageCount()) .append("\n\tCurrentSizeInBytes: ").append(queue.currentSizeInBytes()) .append("\n\tDeadLetterMessageCount: ").append(queue.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(queue.defaultMessageTtlDuration()) .append("\n\tDuplicateMessageDetectionHistoryDuration: ").append(queue.duplicateMessageDetectionHistoryDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(queue.isBatchedOperationsEnabled()) .append("\n\tIsDeadLetteringEnabledForExpiredMessages: ").append(queue.isDeadLetteringEnabledForExpiredMessages()) .append("\n\tIsDuplicateDetectionEnabled: ").append(queue.isDuplicateDetectionEnabled()) .append("\n\tIsExpressEnabled: ").append(queue.isExpressEnabled()) .append("\n\tIsPartitioningEnabled: ").append(queue.isPartitioningEnabled()) .append("\n\tIsSessionEnabled: ").append(queue.isSessionEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(queue.deleteOnIdleDurationInMinutes()) .append("\n\tMaxDeliveryCountBeforeDeadLetteringMessage: ").append(queue.maxDeliveryCountBeforeDeadLetteringMessage()) .append("\n\tMaxSizeInMB: ").append(queue.maxSizeInMB()) .append("\n\tMessageCount: ").append(queue.messageCount()) .append("\n\tScheduledMessageCount: ").append(queue.scheduledMessageCount()) .append("\n\tStatus: ").append(queue.status()) .append("\n\tTransferMessageCount: ").append(queue.transferMessageCount()) .append("\n\tLockDurationInSeconds: ").append(queue.lockDurationInSeconds()) .append("\n\tTransferDeadLetterMessageCount: ").append(queue.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print service bus queue authorization keys info. * * @param queueAuthorizationRule a service bus queue authorization keys */ public static void print(QueueAuthorizationRule queueAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus queue authorization rule: ").append(queueAuthorizationRule.id()) .append("\n\tName: ").append(queueAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(queueAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(queueAuthorizationRule.namespaceName()) .append("\n\tQueue Name: ").append(queueAuthorizationRule.queueName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = queueAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print service bus namespace authorization keys info. * * @param keys a service bus namespace authorization keys */ public static void print(AuthorizationKeys keys) { StringBuilder builder = new StringBuilder() .append("Authorization keys: ") .append("\n\tPrimaryKey: ").append(keys.primaryKey()) .append("\n\tPrimaryConnectionString: ").append(keys.primaryConnectionString()) .append("\n\tSecondaryKey: ").append(keys.secondaryKey()) .append("\n\tSecondaryConnectionString: ").append(keys.secondaryConnectionString()); System.out.println(builder.toString()); } /** * Print service bus namespace authorization rule info. * * @param namespaceAuthorizationRule a service bus namespace authorization rule */ public static void print(NamespaceAuthorizationRule namespaceAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus queue authorization rule: ").append(namespaceAuthorizationRule.id()) .append("\n\tName: ").append(namespaceAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(namespaceAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(namespaceAuthorizationRule.namespaceName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = namespaceAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print service bus topic info. * * @param topic a service bus topic */ public static void print(Topic topic) { StringBuilder builder = new StringBuilder() .append("Service bus topic: ").append(topic.id()) .append("\n\tName: ").append(topic.name()) .append("\n\tResourceGroupName: ").append(topic.resourceGroupName()) .append("\n\tCreatedAt: ").append(topic.createdAt()) .append("\n\tUpdatedAt: ").append(topic.updatedAt()) .append("\n\tAccessedAt: ").append(topic.accessedAt()) .append("\n\tActiveMessageCount: ").append(topic.activeMessageCount()) .append("\n\tCurrentSizeInBytes: ").append(topic.currentSizeInBytes()) .append("\n\tDeadLetterMessageCount: ").append(topic.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(topic.defaultMessageTtlDuration()) .append("\n\tDuplicateMessageDetectionHistoryDuration: ").append(topic.duplicateMessageDetectionHistoryDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(topic.isBatchedOperationsEnabled()) .append("\n\tIsDuplicateDetectionEnabled: ").append(topic.isDuplicateDetectionEnabled()) .append("\n\tIsExpressEnabled: ").append(topic.isExpressEnabled()) .append("\n\tIsPartitioningEnabled: ").append(topic.isPartitioningEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(topic.deleteOnIdleDurationInMinutes()) .append("\n\tMaxSizeInMB: ").append(topic.maxSizeInMB()) .append("\n\tScheduledMessageCount: ").append(topic.scheduledMessageCount()) .append("\n\tStatus: ").append(topic.status()) .append("\n\tTransferMessageCount: ").append(topic.transferMessageCount()) .append("\n\tSubscriptionCount: ").append(topic.subscriptionCount()) .append("\n\tTransferDeadLetterMessageCount: ").append(topic.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print service bus subscription info. * * @param serviceBusSubscription a service bus subscription */ public static void print(ServiceBusSubscription serviceBusSubscription) { StringBuilder builder = new StringBuilder() .append("Service bus subscription: ").append(serviceBusSubscription.id()) .append("\n\tName: ").append(serviceBusSubscription.name()) .append("\n\tResourceGroupName: ").append(serviceBusSubscription.resourceGroupName()) .append("\n\tCreatedAt: ").append(serviceBusSubscription.createdAt()) .append("\n\tUpdatedAt: ").append(serviceBusSubscription.updatedAt()) .append("\n\tAccessedAt: ").append(serviceBusSubscription.accessedAt()) .append("\n\tActiveMessageCount: ").append(serviceBusSubscription.activeMessageCount()) .append("\n\tDeadLetterMessageCount: ").append(serviceBusSubscription.deadLetterMessageCount()) .append("\n\tDefaultMessageTtlDuration: ").append(serviceBusSubscription.defaultMessageTtlDuration()) .append("\n\tIsBatchedOperationsEnabled: ").append(serviceBusSubscription.isBatchedOperationsEnabled()) .append("\n\tDeleteOnIdleDurationInMinutes: ").append(serviceBusSubscription.deleteOnIdleDurationInMinutes()) .append("\n\tScheduledMessageCount: ").append(serviceBusSubscription.scheduledMessageCount()) .append("\n\tStatus: ").append(serviceBusSubscription.status()) .append("\n\tTransferMessageCount: ").append(serviceBusSubscription.transferMessageCount()) .append("\n\tIsDeadLetteringEnabledForExpiredMessages: ").append(serviceBusSubscription.isDeadLetteringEnabledForExpiredMessages()) .append("\n\tIsSessionEnabled: ").append(serviceBusSubscription.isSessionEnabled()) .append("\n\tLockDurationInSeconds: ").append(serviceBusSubscription.lockDurationInSeconds()) .append("\n\tMaxDeliveryCountBeforeDeadLetteringMessage: ").append(serviceBusSubscription.maxDeliveryCountBeforeDeadLetteringMessage()) .append("\n\tIsDeadLetteringEnabledForFilterEvaluationFailedMessages: ").append(serviceBusSubscription.isDeadLetteringEnabledForFilterEvaluationFailedMessages()) .append("\n\tTransferMessageCount: ").append(serviceBusSubscription.transferMessageCount()) .append("\n\tTransferDeadLetterMessageCount: ").append(serviceBusSubscription.transferDeadLetterMessageCount()); System.out.println(builder.toString()); } /** * Print topic Authorization Rule info. * * @param topicAuthorizationRule a topic Authorization Rule */ public static void print(TopicAuthorizationRule topicAuthorizationRule) { StringBuilder builder = new StringBuilder() .append("Service bus topic authorization rule: ").append(topicAuthorizationRule.id()) .append("\n\tName: ").append(topicAuthorizationRule.name()) .append("\n\tResourceGroupName: ").append(topicAuthorizationRule.resourceGroupName()) .append("\n\tNamespace Name: ").append(topicAuthorizationRule.namespaceName()) .append("\n\tTopic Name: ").append(topicAuthorizationRule.topicName()); List<com.azure.resourcemanager.servicebus.models.AccessRights> rights = topicAuthorizationRule.rights(); builder.append("\n\tNumber of access rights in queue: ").append(rights.size()); for (com.azure.resourcemanager.servicebus.models.AccessRights right : rights) { builder.append("\n\t\tAccessRight: ") .append("\n\t\t\tName :").append(right.name()); } System.out.println(builder.toString()); } /** * Print CosmosDB info. * * @param cosmosDBAccount a CosmosDB */ public static void print(CosmosDBAccount cosmosDBAccount) { StringBuilder builder = new StringBuilder() .append("CosmosDB: ").append(cosmosDBAccount.id()) .append("\n\tName: ").append(cosmosDBAccount.name()) .append("\n\tResourceGroupName: ").append(cosmosDBAccount.resourceGroupName()) .append("\n\tKind: ").append(cosmosDBAccount.kind().toString()) .append("\n\tDefault consistency level: ").append(cosmosDBAccount.consistencyPolicy().defaultConsistencyLevel()) .append("\n\tIP range filter: ").append(cosmosDBAccount.ipRangeFilter()); DatabaseAccountListKeysResult keys = cosmosDBAccount.listKeys(); DatabaseAccountListReadOnlyKeysResult readOnlyKeys = cosmosDBAccount.listReadOnlyKeys(); builder .append("\n\tPrimary Master Key: ").append(keys.primaryMasterKey()) .append("\n\tSecondary Master Key: ").append(keys.secondaryMasterKey()) .append("\n\tPrimary Read-Only Key: ").append(readOnlyKeys.primaryReadonlyMasterKey()) .append("\n\tSecondary Read-Only Key: ").append(readOnlyKeys.secondaryReadonlyMasterKey()); for (Location writeReplica : cosmosDBAccount.writableReplications()) { builder.append("\n\t\tWrite replication: ") .append("\n\t\t\tName :").append(writeReplica.locationName()); } builder.append("\n\tNumber of read replications: ").append(cosmosDBAccount.readableReplications().size()); for (Location readReplica : cosmosDBAccount.readableReplications()) { builder.append("\n\t\tRead replication: ") .append("\n\t\t\tName :").append(readReplica.locationName()); } } /** * Print Active Directory User info. * * @param user active directory user */ public static void print(ActiveDirectoryUser user) { StringBuilder builder = new StringBuilder() .append("Active Directory User: ").append(user.id()) .append("\n\tName: ").append(user.name()) .append("\n\tMail: ").append(user.mail()) .append("\n\tMail Nickname: ").append(user.mailNickname()) .append("\n\tUser Principal Name: ").append(user.userPrincipalName()); System.out.println(builder.toString()); } /** * Print Active Directory User info. * * @param role role definition */ public static void print(RoleDefinition role) { StringBuilder builder = new StringBuilder() .append("Role Definition: ").append(role.id()) .append("\n\tName: ").append(role.name()) .append("\n\tRole Name: ").append(role.roleName()) .append("\n\tType: ").append(role.type()) .append("\n\tDescription: ").append(role.description()) .append("\n\tType: ").append(role.type()); Set<Permission> permissions = role.permissions(); builder.append("\n\tPermissions: ").append(permissions.size()); for (Permission permission : permissions) { builder.append("\n\t\tPermission Actions: " + permission.actions().size()); for (String action : permission.actions()) { builder.append("\n\t\t\tName :").append(action); } builder.append("\n\t\tPermission Not Actions: " + permission.notActions().size()); for (String notAction : permission.notActions()) { builder.append("\n\t\t\tName :").append(notAction); } builder.append("\n\t\tPermission Data Actions: " + permission.dataActions().size()); for (String dataActions : permission.dataActions()) { builder.append("\n\t\t\tName :").append(dataActions); } builder.append("\n\t\tPermission Not Data Actions: " + permission.notDataActions().size()); for (String notDataActions : permission.notDataActions()) { builder.append("\n\t\t\tName :").append(notDataActions); } } Set<String> assignableScopes = role.assignableScopes(); builder.append("\n\tAssignable scopes: ").append(assignableScopes.size()); for (String scope : assignableScopes) { builder.append("\n\t\tAssignable Scope: ") .append("\n\t\t\tName :").append(scope); } System.out.println(builder.toString()); } /** * Print Role Assignment info. * * @param roleAssignment role assignment */ public static void print(RoleAssignment roleAssignment) { StringBuilder builder = new StringBuilder() .append("Role Assignment: ") .append("\n\tScope: ").append(roleAssignment.scope()) .append("\n\tPrincipal Id: ").append(roleAssignment.principalId()) .append("\n\tRole Definition Id: ").append(roleAssignment.roleDefinitionId()); System.out.println(builder.toString()); } /** * Print Active Directory Group info. * * @param group active directory group */ public static void print(ActiveDirectoryGroup group) { StringBuilder builder = new StringBuilder() .append("Active Directory Group: ").append(group.id()) .append("\n\tName: ").append(group.name()) .append("\n\tMail: ").append(group.mail()) .append("\n\tSecurity Enabled: ").append(group.securityEnabled()) .append("\n\tGroup members:"); for (ActiveDirectoryObject object : group.listMembers()) { builder.append("\n\t\tType: ").append(object.getClass().getSimpleName()) .append("\tName: ").append(object.name()); } System.out.println(builder.toString()); } /** * Print Active Directory Application info. * * @param application active directory application */ public static void print(ActiveDirectoryApplication application) { StringBuilder builder = new StringBuilder() .append("Active Directory Application: ").append(application.id()) .append("\n\tName: ").append(application.name()) .append("\n\tSign on URL: ").append(application.signOnUrl()) .append("\n\tReply URLs:"); for (String replyUrl : application.replyUrls()) { builder.append("\n\t\t").append(replyUrl); } System.out.println(builder.toString()); } /** * Print Service Principal info. * * @param servicePrincipal service principal */ public static void print(ServicePrincipal servicePrincipal) { StringBuilder builder = new StringBuilder() .append("Service Principal: ").append(servicePrincipal.id()) .append("\n\tName: ").append(servicePrincipal.name()) .append("\n\tApplication Id: ").append(servicePrincipal.applicationId()); List<String> names = servicePrincipal.servicePrincipalNames(); builder.append("\n\tNames: ").append(names.size()); for (String name : names) { builder.append("\n\t\tName: ").append(name); } System.out.println(builder.toString()); } /** * Print Network Watcher info. * * @param nw network watcher */ public static void print(NetworkWatcher nw) { StringBuilder builder = new StringBuilder() .append("Network Watcher: ").append(nw.id()) .append("\n\tName: ").append(nw.name()) .append("\n\tResource group name: ").append(nw.resourceGroupName()) .append("\n\tRegion name: ").append(nw.regionName()); System.out.println(builder.toString()); } /** * Print packet capture info. * * @param resource packet capture */ public static void print(PacketCapture resource) { StringBuilder sb = new StringBuilder().append("Packet Capture: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tTarget id: ").append(resource.targetId()) .append("\n\tTime limit in seconds: ").append(resource.timeLimitInSeconds()) .append("\n\tBytes to capture per packet: ").append(resource.bytesToCapturePerPacket()) .append("\n\tProvisioning state: ").append(resource.provisioningState()) .append("\n\tStorage location:") .append("\n\tStorage account id: ").append(resource.storageLocation().storageId()) .append("\n\tStorage account path: ").append(resource.storageLocation().storagePath()) .append("\n\tFile path: ").append(resource.storageLocation().filePath()) .append("\n\t Packet capture filters: ").append(resource.filters().size()); for (PacketCaptureFilter filter : resource.filters()) { sb.append("\n\t\tProtocol: ").append(filter.protocol()); sb.append("\n\t\tLocal IP address: ").append(filter.localIpAddress()); sb.append("\n\t\tRemote IP address: ").append(filter.remoteIpAddress()); sb.append("\n\t\tLocal port: ").append(filter.localPort()); sb.append("\n\t\tRemote port: ").append(filter.remotePort()); } System.out.println(sb.toString()); } /** * Print verification IP flow info. * * @param resource IP flow verification info */ public static void print(VerificationIPFlow resource) { System.out.println(new StringBuilder("IP flow verification: ") .append("\n\tAccess: ").append(resource.access()) .append("\n\tRule name: ").append(resource.ruleName()) .toString()); } /** * Print topology info. * * @param resource topology */ public static void print(Topology resource) { StringBuilder sb = new StringBuilder().append("Topology: ").append(resource.id()) .append("\n\tTopology parameters: ") .append("\n\t\tResource group: ").append(resource.topologyParameters().targetResourceGroupName()) .append("\n\t\tVirtual network: ").append(resource.topologyParameters().targetVirtualNetwork() == null ? "" : resource.topologyParameters().targetVirtualNetwork().id()) .append("\n\t\tSubnet id: ").append(resource.topologyParameters().targetSubnet() == null ? "" : resource.topologyParameters().targetSubnet().id()) .append("\n\tCreated time: ").append(resource.createdTime()) .append("\n\tLast modified time: ").append(resource.lastModifiedTime()); for (TopologyResource tr : resource.resources().values()) { sb.append("\n\tTopology resource: ").append(tr.id()) .append("\n\t\tName: ").append(tr.name()) .append("\n\t\tLocation: ").append(tr.location()) .append("\n\t\tAssociations:"); for (TopologyAssociation association : tr.associations()) { sb.append("\n\t\t\tName:").append(association.name()) .append("\n\t\t\tResource id:").append(association.resourceId()) .append("\n\t\t\tAssociation type:").append(association.associationType()); } } System.out.println(sb.toString()); } /** * Print flow log settings info. * * @param resource flow log settings */ public static void print(FlowLogSettings resource) { System.out.println(new StringBuilder().append("Flow log settings: ") .append("Target resource id: ").append(resource.targetResourceId()) .append("\n\tFlow log enabled: ").append(resource.enabled()) .append("\n\tStorage account id: ").append(resource.storageId()) .append("\n\tRetention policy enabled: ").append(resource.isRetentionEnabled()) .append("\n\tRetention policy days: ").append(resource.retentionDays()) .toString()); } /** * Print availability set info. * * @param resource an availability set */ public static void print(SecurityGroupView resource) { StringBuilder sb = new StringBuilder().append("Security group view: ") .append("\n\tVirtual machine id: ").append(resource.vmId()); for (SecurityGroupNetworkInterface sgni : resource.networkInterfaces().values()) { sb.append("\n\tSecurity group network interface:").append(sgni.id()) .append("\n\t\tSecurity group network interface:") .append("\n\t\tEffective security rules:"); for (EffectiveNetworkSecurityRule rule : sgni.securityRuleAssociations().effectiveSecurityRules()) { sb.append("\n\t\t\tName: ").append(rule.name()) .append("\n\t\t\tDirection: ").append(rule.direction()) .append("\n\t\t\tAccess: ").append(rule.access()) .append("\n\t\t\tPriority: ").append(rule.priority()) .append("\n\t\t\tSource address prefix: ").append(rule.sourceAddressPrefix()) .append("\n\t\t\tSource port range: ").append(rule.sourcePortRange()) .append("\n\t\t\tDestination address prefix: ").append(rule.destinationAddressPrefix()) .append("\n\t\t\tDestination port range: ").append(rule.destinationPortRange()) .append("\n\t\t\tProtocol: ").append(rule.protocol()); } sb.append("\n\t\tSubnet:").append(sgni.securityRuleAssociations().subnetAssociation().id()); printSecurityRule(sb, sgni.securityRuleAssociations().subnetAssociation().securityRules()); if (sgni.securityRuleAssociations().networkInterfaceAssociation() != null) { sb.append("\n\t\tNetwork interface:").append(sgni.securityRuleAssociations().networkInterfaceAssociation().id()); printSecurityRule(sb, sgni.securityRuleAssociations().networkInterfaceAssociation().securityRules()); } sb.append("\n\t\tDefault security rules:"); printSecurityRule(sb, sgni.securityRuleAssociations().defaultSecurityRules()); } System.out.println(sb.toString()); } private static void printSecurityRule(StringBuilder sb, List<SecurityRuleInner> rules) { for (SecurityRuleInner rule : rules) { sb.append("\n\t\t\tName: ").append(rule.name()) .append("\n\t\t\tDirection: ").append(rule.direction()) .append("\n\t\t\tAccess: ").append(rule.access()) .append("\n\t\t\tPriority: ").append(rule.priority()) .append("\n\t\t\tSource address prefix: ").append(rule.sourceAddressPrefix()) .append("\n\t\t\tSource port range: ").append(rule.sourcePortRange()) .append("\n\t\t\tDestination address prefix: ").append(rule.destinationAddressPrefix()) .append("\n\t\t\tDestination port range: ").append(rule.destinationPortRange()) .append("\n\t\t\tProtocol: ").append(rule.protocol()) .append("\n\t\t\tDescription: ").append(rule.description()) .append("\n\t\t\tProvisioning state: ").append(rule.provisioningState()); } } /** * Print next hop info. * * @param resource an availability set */ public static void print(NextHop resource) { System.out.println(new StringBuilder("Next hop: ") .append("Next hop type: ").append(resource.nextHopType()) .append("\n\tNext hop ip address: ").append(resource.nextHopIpAddress()) .append("\n\tRoute table id: ").append(resource.routeTableId()) .toString()); } /** * Print container group info. * * @param resource a container group */ public static void print(ContainerGroup resource) { StringBuilder info = new StringBuilder().append("Container Group: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tOS type: ").append(resource.osType()); if (resource.ipAddress() != null) { info.append("\n\tPublic IP address: ").append(resource.ipAddress()); } if (resource.externalTcpPorts() != null) { info.append("\n\tExternal TCP ports:"); for (int port : resource.externalTcpPorts()) { info.append(" ").append(port); } } if (resource.externalUdpPorts() != null) { info.append("\n\tExternal UDP ports:"); for (int port : resource.externalUdpPorts()) { info.append(" ").append(port); } } if (resource.imageRegistryServers() != null) { info.append("\n\tPrivate Docker image registries:"); for (String server : resource.imageRegistryServers()) { info.append(" ").append(server); } } if (resource.volumes() != null) { info.append("\n\tVolume mapping: "); for (Map.Entry<String, Volume> entry : resource.volumes().entrySet()) { info.append("\n\t\tName: ").append(entry.getKey()).append(" -> ") .append(entry.getValue().azureFile() != null ? entry.getValue().azureFile().shareName() : "empty direcory volume"); } } if (resource.containers() != null) { info.append("\n\tContainer instances: "); for (Map.Entry<String, Container> entry : resource.containers().entrySet()) { Container container = entry.getValue(); info.append("\n\t\tName: ").append(entry.getKey()).append(" -> ").append(container.image()); info.append("\n\t\t\tResources: "); info.append(container.resources().requests().cpu()).append("CPUs "); info.append(container.resources().requests().memoryInGB()).append("GB"); info.append("\n\t\t\tPorts:"); for (ContainerPort port : container.ports()) { info.append(" ").append(port.port()); } if (container.volumeMounts() != null) { info.append("\n\t\t\tVolume mounts:"); for (VolumeMount volumeMount : container.volumeMounts()) { info.append(" ").append(volumeMount.name()).append("->").append(volumeMount.mountPath()); } } if (container.command() != null) { info.append("\n\t\t\tStart commands:"); for (String command : container.command()) { info.append("\n\t\t\t\t").append(command); } } if (container.environmentVariables() != null) { info.append("\n\t\t\tENV vars:"); for (EnvironmentVariable envVar : container.environmentVariables()) { info.append("\n\t\t\t\t").append(envVar.name()).append("=").append(envVar.value()); } } } } System.out.println(info.toString()); } /** * Print event hub namespace. * * @param resource a virtual machine */ public static void print(EventHubNamespace resource) { StringBuilder info = new StringBuilder(); info.append("Eventhub Namespace: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tAzureInsightMetricId: ").append(resource.azureInsightMetricId()) .append("\n\tIsAutoScale enabled: ").append(resource.isAutoScaleEnabled()) .append("\n\tServiceBus endpoint: ").append(resource.serviceBusEndpoint()) .append("\n\tThroughPut upper limit: ").append(resource.throughputUnitsUpperLimit()) .append("\n\tCurrent ThroughPut: ").append(resource.currentThroughputUnits()) .append("\n\tCreated time: ").append(resource.createdAt()) .append("\n\tUpdated time: ").append(resource.updatedAt()); System.out.println(info.toString()); } /** * Print event hub. * * @param resource event hub */ public static void print(EventHub resource) { StringBuilder info = new StringBuilder(); info.append("Eventhub: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tNamespace resource group: ").append(resource.namespaceResourceGroupName()) .append("\n\tNamespace: ").append(resource.namespaceName()) .append("\n\tIs data capture enabled: ").append(resource.isDataCaptureEnabled()) .append("\n\tPartition ids: ").append(resource.partitionIds()); if (resource.isDataCaptureEnabled()) { info.append("\n\t\t\tData capture window size in MB: ").append(resource.dataCaptureWindowSizeInMB()); info.append("\n\t\t\tData capture window size in seconds: ").append(resource.dataCaptureWindowSizeInSeconds()); if (resource.captureDestination() != null) { info.append("\n\t\t\tData capture storage account: ").append(resource.captureDestination().storageAccountResourceId()); info.append("\n\t\t\tData capture storage container: ").append(resource.captureDestination().blobContainer()); } } System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing. * * @param resource event hub namespace disaster recovery pairing */ public static void print(EventHubDisasterRecoveryPairing resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tPrimary namespace resource group name: ").append(resource.primaryNamespaceResourceGroupName()) .append("\n\tPrimary namespace name: ").append(resource.primaryNamespaceName()) .append("\n\tSecondary namespace: ").append(resource.secondaryNamespaceId()) .append("\n\tNamespace role: ").append(resource.namespaceRole()); System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing auth rules. * * @param resource event hub namespace disaster recovery pairing auth rule */ public static void print(DisasterRecoveryPairingAuthorizationRule resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing auth rule: ").append(resource.name()); List<String> rightsStr = new ArrayList<>(); for (AccessRights rights : resource.rights()) { rightsStr.add(rights.toString()); } info.append("\n\tRights: ").append(rightsStr); System.out.println(info.toString()); } /** * Print event hub namespace recovery pairing auth rule key. * * @param resource event hub namespace disaster recovery pairing auth rule key */ public static void print(DisasterRecoveryPairingAuthorizationKey resource) { StringBuilder info = new StringBuilder(); info.append("DisasterRecoveryPairing auth key: ") .append("\n\t Alias primary connection string: ").append(resource.aliasPrimaryConnectionString()) .append("\n\t Alias secondary connection string: ").append(resource.aliasSecondaryConnectionString()) .append("\n\t Primary key: ").append(resource.primaryKey()) .append("\n\t Secondary key: ").append(resource.secondaryKey()) .append("\n\t Primary connection string: ").append(resource.primaryConnectionString()) .append("\n\t Secondary connection string: ").append(resource.secondaryConnectionString()); System.out.println(info.toString()); } /** * Print event hub consumer group. * * @param resource event hub consumer group */ public static void print(EventHubConsumerGroup resource) { StringBuilder info = new StringBuilder(); info.append("Event hub consumer group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tNamespace resource group: ").append(resource.namespaceResourceGroupName()) .append("\n\tNamespace: ").append(resource.namespaceName()) .append("\n\tEvent hub name: ").append(resource.eventHubName()) .append("\n\tUser metadata: ").append(resource.userMetadata()); System.out.println(info.toString()); } /** * Print Diagnostic Setting. * * @param resource Diagnostic Setting instance */ public static void print(DiagnosticSetting resource) { StringBuilder info = new StringBuilder("Diagnostic Setting: ") .append("\n\tId: ").append(resource.id()) .append("\n\tAssociated resource Id: ").append(resource.resourceId()) .append("\n\tName: ").append(resource.name()) .append("\n\tStorage Account Id: ").append(resource.storageAccountId()) .append("\n\tEventHub Namespace Autorization Rule Id: ").append(resource.eventHubAuthorizationRuleId()) .append("\n\tEventHub name: ").append(resource.eventHubName()) .append("\n\tLog Analytics workspace Id: ").append(resource.workspaceId()); if (resource.logs() != null && !resource.logs().isEmpty()) { info.append("\n\tLog Settings: "); for (LogSettings ls : resource.logs()) { info.append("\n\t\tCategory: ").append(ls.category()); info.append("\n\t\tRetention policy: "); if (ls.retentionPolicy() != null) { info.append(ls.retentionPolicy().days() + " days"); } else { info.append("NONE"); } } } if (resource.metrics() != null && !resource.metrics().isEmpty()) { info.append("\n\tMetric Settings: "); for (MetricSettings ls : resource.metrics()) { info.append("\n\t\tCategory: ").append(ls.category()); info.append("\n\t\tTimegrain: ").append(ls.timeGrain()); info.append("\n\t\tRetention policy: "); if (ls.retentionPolicy() != null) { info.append(ls.retentionPolicy().days() + " days"); } else { info.append("NONE"); } } } System.out.println(info.toString()); } /** * Print Action group settings. * * @param actionGroup action group instance */ public static void print(ActionGroup actionGroup) { StringBuilder info = new StringBuilder("Action Group: ") .append("\n\tId: ").append(actionGroup.id()) .append("\n\tName: ").append(actionGroup.name()) .append("\n\tShort Name: ").append(actionGroup.shortName()); if (actionGroup.emailReceivers() != null && !actionGroup.emailReceivers().isEmpty()) { info.append("\n\tEmail receivers: "); for (EmailReceiver er : actionGroup.emailReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tEMail: ").append(er.emailAddress()); info.append("\n\t\tStatus: ").append(er.status()); info.append("\n\t\t==="); } } if (actionGroup.smsReceivers() != null && !actionGroup.smsReceivers().isEmpty()) { info.append("\n\tSMS text message receivers: "); for (SmsReceiver er : actionGroup.smsReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tPhone: ").append(er.countryCode() + er.phoneNumber()); info.append("\n\t\tStatus: ").append(er.status()); info.append("\n\t\t==="); } } if (actionGroup.webhookReceivers() != null && !actionGroup.webhookReceivers().isEmpty()) { info.append("\n\tWebhook receivers: "); for (WebhookReceiver er : actionGroup.webhookReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tURI: ").append(er.serviceUri()); info.append("\n\t\t==="); } } if (actionGroup.pushNotificationReceivers() != null && !actionGroup.pushNotificationReceivers().isEmpty()) { info.append("\n\tApp Push Notification receivers: "); for (AzureAppPushReceiver er : actionGroup.pushNotificationReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tEmail: ").append(er.emailAddress()); info.append("\n\t\t==="); } } if (actionGroup.voiceReceivers() != null && !actionGroup.voiceReceivers().isEmpty()) { info.append("\n\tVoice Message receivers: "); for (VoiceReceiver er : actionGroup.voiceReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tPhone: ").append(er.countryCode() + er.phoneNumber()); info.append("\n\t\t==="); } } if (actionGroup.automationRunbookReceivers() != null && !actionGroup.automationRunbookReceivers().isEmpty()) { info.append("\n\tAutomation Runbook receivers: "); for (AutomationRunbookReceiver er : actionGroup.automationRunbookReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tRunbook Name: ").append(er.runbookName()); info.append("\n\t\tAccount Id: ").append(er.automationAccountId()); info.append("\n\t\tIs Global: ").append(er.isGlobalRunbook()); info.append("\n\t\tService URI: ").append(er.serviceUri()); info.append("\n\t\tWebhook resource Id: ").append(er.webhookResourceId()); info.append("\n\t\t==="); } } if (actionGroup.azureFunctionReceivers() != null && !actionGroup.azureFunctionReceivers().isEmpty()) { info.append("\n\tAzure Functions receivers: "); for (AzureFunctionReceiver er : actionGroup.azureFunctionReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tFunction Name: ").append(er.functionName()); info.append("\n\t\tFunction App Resource Id: ").append(er.functionAppResourceId()); info.append("\n\t\tFunction Trigger URI: ").append(er.httpTriggerUrl()); info.append("\n\t\t==="); } } if (actionGroup.logicAppReceivers() != null && !actionGroup.logicAppReceivers().isEmpty()) { info.append("\n\tLogic App receivers: "); for (LogicAppReceiver er : actionGroup.logicAppReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tResource Id: ").append(er.resourceId()); info.append("\n\t\tCallback URL: ").append(er.callbackUrl()); info.append("\n\t\t==="); } } if (actionGroup.itsmReceivers() != null && !actionGroup.itsmReceivers().isEmpty()) { info.append("\n\tITSM receivers: "); for (ItsmReceiver er : actionGroup.itsmReceivers()) { info.append("\n\t\tName: ").append(er.name()); info.append("\n\t\tWorkspace Id: ").append(er.workspaceId()); info.append("\n\t\tConnection Id: ").append(er.connectionId()); info.append("\n\t\tRegion: ").append(er.region()); info.append("\n\t\tTicket Configuration: ").append(er.ticketConfiguration()); info.append("\n\t\t==="); } } System.out.println(info.toString()); } /** * Print activity log alert settings. * * @param activityLogAlert activity log instance */ public static void print(ActivityLogAlert activityLogAlert) { StringBuilder info = new StringBuilder("Activity Log Alert: ") .append("\n\tId: ").append(activityLogAlert.id()) .append("\n\tName: ").append(activityLogAlert.name()) .append("\n\tDescription: ").append(activityLogAlert.description()) .append("\n\tIs Enabled: ").append(activityLogAlert.enabled()); if (activityLogAlert.scopes() != null && !activityLogAlert.scopes().isEmpty()) { info.append("\n\tScopes: "); for (String er : activityLogAlert.scopes()) { info.append("\n\t\tId: ").append(er); } } if (activityLogAlert.actionGroupIds() != null && !activityLogAlert.actionGroupIds().isEmpty()) { info.append("\n\tAction Groups: "); for (String er : activityLogAlert.actionGroupIds()) { info.append("\n\t\tAction Group Id: ").append(er); } } if (activityLogAlert.equalsConditions() != null && !activityLogAlert.equalsConditions().isEmpty()) { info.append("\n\tAlert conditions (when all of is true): "); for (Map.Entry<String, String> er : activityLogAlert.equalsConditions().entrySet()) { info.append("\n\t\t'").append(er.getKey()).append("' equals '").append(er.getValue()).append("'"); } } System.out.println(info.toString()); } /** * Print metric alert settings. * * @param metricAlert metric alert instance */ public static void print(MetricAlert metricAlert) { StringBuilder info = new StringBuilder("Metric Alert: ") .append("\n\tId: ").append(metricAlert.id()) .append("\n\tName: ").append(metricAlert.name()) .append("\n\tDescription: ").append(metricAlert.description()) .append("\n\tIs Enabled: ").append(metricAlert.enabled()) .append("\n\tIs Auto Mitigated: ").append(metricAlert.autoMitigate()) .append("\n\tSeverity: ").append(metricAlert.severity()) .append("\n\tWindow Size: ").append(metricAlert.windowSize()) .append("\n\tEvaluation Frequency: ").append(metricAlert.evaluationFrequency()); if (metricAlert.scopes() != null && !metricAlert.scopes().isEmpty()) { info.append("\n\tScopes: "); for (String er : metricAlert.scopes()) { info.append("\n\t\tId: ").append(er); } } if (metricAlert.actionGroupIds() != null && !metricAlert.actionGroupIds().isEmpty()) { info.append("\n\tAction Groups: "); for (String er : metricAlert.actionGroupIds()) { info.append("\n\t\tAction Group Id: ").append(er); } } if (metricAlert.alertCriterias() != null && !metricAlert.alertCriterias().isEmpty()) { info.append("\n\tAlert conditions (when all of is true): "); for (Map.Entry<String, MetricAlertCondition> er : metricAlert.alertCriterias().entrySet()) { MetricAlertCondition alertCondition = er.getValue(); info.append("\n\t\tCondition name: ").append(er.getKey()) .append("\n\t\tSignal name: ").append(alertCondition.metricName()) .append("\n\t\tMetric Namespace: ").append(alertCondition.metricNamespace()) .append("\n\t\tOperator: ").append(alertCondition.condition()) .append("\n\t\tThreshold: ").append(alertCondition.threshold()) .append("\n\t\tTime Aggregation: ").append(alertCondition.timeAggregation()); if (alertCondition.dimensions() != null && !alertCondition.dimensions().isEmpty()) { for (MetricDimension dimon : alertCondition.dimensions()) { info.append("\n\t\tDimension Filter: ").append("Name [").append(dimon.name()).append("] operator [Include] values["); for (String vals : dimon.values()) { info.append(vals).append(", "); } info.append("]"); } } } } System.out.println(info.toString()); } /** * Print spring service settings. * * @param springService spring service instance */ public static void print(SpringService springService) { StringBuilder info = new StringBuilder("Spring Service: ") .append("\n\tId: ").append(springService.id()) .append("\n\tName: ").append(springService.name()) .append("\n\tResource Group: ").append(springService.resourceGroupName()) .append("\n\tRegion: ").append(springService.region()) .append("\n\tTags: ").append(springService.tags()); ConfigServerProperties serverProperties = springService.getServerProperties(); if (serverProperties != null && serverProperties.provisioningState() != null && serverProperties.provisioningState().equals(ConfigServerState.SUCCEEDED) && serverProperties.configServer() != null) { info.append("\n\tProperties: "); if (serverProperties.configServer().gitProperty() != null) { info.append("\n\t\tGit: ").append(serverProperties.configServer().gitProperty().uri()); } } if (springService.sku() != null) { info.append("\n\tSku: ") .append("\n\t\tName: ").append(springService.sku().name()) .append("\n\t\tTier: ").append(springService.sku().tier()) .append("\n\t\tCapacity: ").append(springService.sku().capacity()); } MonitoringSettingProperties monitoringSettingProperties = springService.getMonitoringSetting(); if (monitoringSettingProperties != null && monitoringSettingProperties.provisioningState() != null && monitoringSettingProperties.provisioningState().equals(MonitoringSettingState.SUCCEEDED)) { info.append("\n\tTrace: ") .append("\n\t\tEnabled: ").append(monitoringSettingProperties.traceEnabled()) .append("\n\t\tApp Insight Instrumentation Key: ").append(monitoringSettingProperties.appInsightsInstrumentationKey()); } System.out.println(info.toString()); } /** * Print spring app settings. * * @param springApp spring app instance */ public static void print(SpringApp springApp) { StringBuilder info = new StringBuilder("Spring Service: ") .append("\n\tId: ").append(springApp.id()) .append("\n\tName: ").append(springApp.name()) .append("\n\tPublic Endpoint: ").append(springApp.isPublic()) .append("\n\tUrl: ").append(springApp.url()) .append("\n\tHttps Only: ").append(springApp.isHttpsOnly()) .append("\n\tFully Qualified Domain Name: ").append(springApp.fqdn()) .append("\n\tActive Deployment Name: ").append(springApp.activeDeploymentName()); if (springApp.temporaryDisk() != null) { info.append("\n\tTemporary Disk:") .append("\n\t\tSize In GB: ").append(springApp.temporaryDisk().sizeInGB()) .append("\n\t\tMount Path: ").append(springApp.temporaryDisk().mountPath()); } if (springApp.persistentDisk() != null) { info.append("\n\tPersistent Disk:") .append("\n\t\tSize In GB: ").append(springApp.persistentDisk().sizeInGB()) .append("\n\t\tMount Path: ").append(springApp.persistentDisk().mountPath()); } if (springApp.identity() != null) { info.append("\n\tIdentity:") .append("\n\t\tType: ").append(springApp.identity().type()) .append("\n\t\tPrincipal Id: ").append(springApp.identity().principalId()) .append("\n\t\tTenant Id: ").append(springApp.identity().tenantId()); } System.out.println(info.toString()); } /** * Print private link resource. * * @param privateLinkResource the private link resource */ public static void print(PrivateLinkResource privateLinkResource) { StringBuilder info = new StringBuilder("Private Link Resource: ") .append("\n\tGroup ID: ").append(privateLinkResource.groupId()) .append("\n\tRequired Member Names: ").append(privateLinkResource.requiredMemberNames()) .append("\n\tRequired DNS Zone Names: ").append(privateLinkResource.requiredDnsZoneNames()); System.out.println(info); } /** * Print private endpoint. * * @param privateEndpoint the private endpoint */ public static void print(PrivateEndpoint privateEndpoint) { StringBuilder info = new StringBuilder("Private Endpoint: ") .append("\n\tId: ").append(privateEndpoint.id()) .append("\n\tName: ").append(privateEndpoint.name()); if (privateEndpoint.privateLinkServiceConnections() != null && !privateEndpoint.privateLinkServiceConnections().isEmpty()) { for (PrivateEndpoint.PrivateLinkServiceConnection connection : privateEndpoint.privateLinkServiceConnections().values()) { info .append("\n\t\tPrivate Link Service Connection Name: ").append(connection.name()) .append("\n\t\tPrivate Link Resource ID: ").append(connection.privateLinkResourceId()) .append("\n\t\tSub Resource Names: ").append(connection.subResourceNames()) .append("\n\t\tProvision Status: ").append(connection.state().status()); } } if (privateEndpoint.privateLinkServiceConnections() != null && !privateEndpoint.privateLinkServiceConnections().isEmpty()) { info.append("\n\tPrivate Link Service Connections:"); for (PrivateEndpoint.PrivateLinkServiceConnection connection : privateEndpoint.privateLinkServiceConnections().values()) { info .append("\n\t\tName: ").append(connection.name()) .append("\n\t\tPrivate Link Resource ID: ").append(connection.privateLinkResourceId()) .append("\n\t\tSub Resource Names: ").append(connection.subResourceNames()) .append("\n\t\tStatus: ").append(connection.state().status()); } } if (privateEndpoint.customDnsConfigurations() != null && !privateEndpoint.customDnsConfigurations().isEmpty()) { info.append("\n\tCustom DNS Configure:"); for (CustomDnsConfigPropertiesFormat customDns : privateEndpoint.customDnsConfigurations()) { info .append("\n\t\tFQDN: ").append(customDns.fqdn()) .append("\n\t\tIP Address: ").append(customDns.ipAddresses()); } } System.out.println(info); } /** * Sends a GET request to target URL. * <p> * Retry logic tuned for AppService. * The method does not handle 301 redirect. * * @param urlString the target URL. * @return Content of the HTTP response. */ public static String sendGetRequest(String urlString) { HttpRequest request = new HttpRequest(HttpMethod.GET, urlString); Mono<Response<String>> response = stringResponse(HTTP_PIPELINE.send(request) .flatMap(response1 -> { int code = response1.getStatusCode(); if (code == 200 || code == 400 || code == 404) { return Mono.just(response1); } else { return Mono.error(new HttpResponseException(response1)); } }) .retryWhen(Retry .fixedDelay(5, Duration.ofSeconds(30)) .filter(t -> { boolean retry = false; if (t instanceof TimeoutException) { retry = true; } else if (t instanceof HttpResponseException && ((HttpResponseException) t).getResponse().getStatusCode() == 503) { retry = true; } if (retry) { LOGGER.info("retry GET request to {}", urlString); } return retry; }))); Response<String> ret = response.block(); return ret == null ? null : ret.getValue(); } /** * Sends a POST request to target URL. * <p> * Retry logic tuned for AppService. * * @param urlString the target URL. * @param body the request body. * @return Content of the HTTP response. * */ public static String sendPostRequest(String urlString, String body) { try { HttpRequest request = new HttpRequest(HttpMethod.POST, urlString).setBody(body); Mono<Response<String>> response = stringResponse(HTTP_PIPELINE.send(request) .flatMap(response1 -> { int code = response1.getStatusCode(); if (code == 200 || code == 400 || code == 404) { return Mono.just(response1); } else { return Mono.error(new HttpResponseException(response1)); } }) .retryWhen(Retry .fixedDelay(5, Duration.ofSeconds(30)) .filter(t -> { boolean retry = false; if (t instanceof TimeoutException) { retry = true; } if (retry) { LOGGER.info("retry POST request to {}", urlString); } return retry; }))); Response<String> ret = response.block(); return ret == null ? null : ret.getValue(); } catch (Exception e) { LOGGER.logThrowableAsError(e); return null; } } private static Mono<Response<String>> stringResponse(Mono<HttpResponse> responseMono) { return responseMono.flatMap(response -> response.getBodyAsString() .map(str -> new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), str))); } private static final HttpPipeline HTTP_PIPELINE = new HttpPipelineBuilder() .policies( new HttpLoggingPolicy(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)), new RetryPolicy("Retry-After", ChronoUnit.SECONDS)) .build(); /** * Get the size of the iterable. * * @param iterable iterable to count size * @param <T> generic type parameter of the iterable * @return size of the iterable */ public static <T> int getSize(Iterable<T> iterable) { int res = 0; Iterator<T> iterator = iterable.iterator(); while (iterator.hasNext()) { iterator.next(); res++; } return res; } }
class Utils { private static final ClientLogger LOGGER = new ClientLogger(Utils.class); private static String sshPublicKey; private Utils() { } /** @return a generated password */ public static String password() { String password = new ResourceManagerUtils.InternalRuntimeContext().randomResourceName("Pa5$", 12); System.out.printf("Password: %s%n", password); return password; } /** * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } /** * Creates a randomized resource name. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @param prefix the prefix to the name. * @param maxLen the max length of the name. * @return the randomized resource name. */ public static String randomResourceName(AzureResourceManager azure, String prefix, int maxLen) { return azure.resourceGroups().manager().internalContext().randomResourceName(prefix, maxLen); } /** * Generates the specified number of random resource names with the same prefix. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @param prefix the prefix to be used if possible * @param maxLen the maximum length for the random generated name * @param count the number of names to generate * @return the randomized resource names. */ public static String[] randomResourceNames(AzureResourceManager azure, String prefix, int maxLen, int count) { String[] names = new String[count]; for (int i = 0; i < count; i++) { names[i] = randomResourceName(azure, prefix, maxLen); } return names; } /** * Creates a random UUID. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @return the random UUID. */ public static String randomUuid(AzureResourceManager azure) { return azure.resourceGroups().manager().internalContext().randomUuid(); } /** * Creates a randomized resource name. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param authenticated the AzureResourceManager.Authenticated instance. * @param prefix the prefix to the name. * @param maxLen the max length of the name. * @return the randomized resource name. */ public static String randomResourceName(AzureResourceManager.Authenticated authenticated, String prefix, int maxLen) { return authenticated.roleAssignments().manager().internalContext().randomResourceName(prefix, maxLen); } /** * Print resource group info. * * @param resource a resource group */ public static void print(ResourceGroup resource) { StringBuilder info = new StringBuilder(); info.append("Resource Group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()); System.out.println(info.toString()); } /** * Print User Assigned MSI info. * * @param resource a User Assigned MSI */ public static void print(Identity resource) { StringBuilder info = new StringBuilder(); info.append("Resource Group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tService Principal Id: ").append(resource.principalId()) .append("\n\tClient Id: ").append(resource.clientId()) .append("\n\tTenant Id: ").append(resource.tenantId()); System.out.println(info.toString()); } /** * Print virtual machine info. * * @param resource a virtual machine */ public static void print(VirtualMachine resource) { StringBuilder storageProfile = new StringBuilder().append("\n\tStorageProfile: "); if (resource.storageProfile().imageReference() != null) { storageProfile.append("\n\t\tImageReference:"); storageProfile.append("\n\t\t\tPublisher: ").append(resource.storageProfile().imageReference().publisher()); storageProfile.append("\n\t\t\tOffer: ").append(resource.storageProfile().imageReference().offer()); storageProfile.append("\n\t\t\tSKU: ").append(resource.storageProfile().imageReference().sku()); storageProfile.append("\n\t\t\tVersion: ").append(resource.storageProfile().imageReference().version()); } if (resource.storageProfile().osDisk() != null) { storageProfile.append("\n\t\tOSDisk:"); storageProfile.append("\n\t\t\tOSType: ").append(resource.storageProfile().osDisk().osType()); storageProfile.append("\n\t\t\tName: ").append(resource.storageProfile().osDisk().name()); storageProfile.append("\n\t\t\tCaching: ").append(resource.storageProfile().osDisk().caching()); storageProfile.append("\n\t\t\tCreateOption: ").append(resource.storageProfile().osDisk().createOption()); storageProfile.append("\n\t\t\tDiskSizeGB: ").append(resource.storageProfile().osDisk().diskSizeGB()); if (resource.storageProfile().osDisk().managedDisk() != null) { if (resource.storageProfile().osDisk().managedDisk().diskEncryptionSet() != null) { storageProfile.append("\n\t\t\tDiskEncryptionSet Id: ") .append(resource.storageProfile().osDisk().managedDisk().diskEncryptionSet().id()); } } if (resource.storageProfile().osDisk().image() != null) { storageProfile.append("\n\t\t\tImage Uri: ").append(resource.storageProfile().osDisk().image().uri()); } if (resource.storageProfile().osDisk().vhd() != null) { storageProfile.append("\n\t\t\tVhd Uri: ").append(resource.storageProfile().osDisk().vhd().uri()); } if (resource.storageProfile().osDisk().encryptionSettings() != null) { storageProfile.append("\n\t\t\tEncryptionSettings: "); storageProfile.append("\n\t\t\t\tEnabled: ").append(resource.storageProfile().osDisk().encryptionSettings().enabled()); storageProfile.append("\n\t\t\t\tDiskEncryptionKey Uri: ").append(resource .storageProfile() .osDisk() .encryptionSettings() .diskEncryptionKey().secretUrl()); storageProfile.append("\n\t\t\t\tKeyEncryptionKey Uri: ").append(resource .storageProfile() .osDisk() .encryptionSettings() .keyEncryptionKey().keyUrl()); } } if (resource.storageProfile().dataDisks() != null) { int i = 0; for (DataDisk disk : resource.storageProfile().dataDisks()) { storageProfile.append("\n\t\tDataDisk: storageProfile.append("\n\t\t\tName: ").append(disk.name()); storageProfile.append("\n\t\t\tCaching: ").append(disk.caching()); storageProfile.append("\n\t\t\tCreateOption: ").append(disk.createOption()); storageProfile.append("\n\t\t\tDiskSizeGB: ").append(disk.diskSizeGB()); storageProfile.append("\n\t\t\tLun: ").append(disk.lun()); if (resource.isManagedDiskEnabled()) { if (disk.managedDisk() != null) { storageProfile.append("\n\t\t\tManaged Disk Id: ").append(disk.managedDisk().id()); if (disk.managedDisk().diskEncryptionSet() != null) { storageProfile.append("\n\t\t\tDiskEncryptionSet Id: ").append(disk.managedDisk().diskEncryptionSet().id()); } } } else { if (disk.vhd().uri() != null) { storageProfile.append("\n\t\t\tVhd Uri: ").append(disk.vhd().uri()); } } if (disk.image() != null) { storageProfile.append("\n\t\t\tImage Uri: ").append(disk.image().uri()); } } } StringBuilder osProfile = new StringBuilder().append("\n\tOSProfile: "); if (resource.osProfile() != null) { osProfile.append("\n\t\tComputerName:").append(resource.osProfile().computerName()); if (resource.osProfile().windowsConfiguration() != null) { osProfile.append("\n\t\t\tWindowsConfiguration: "); osProfile.append("\n\t\t\t\tProvisionVMAgent: ") .append(resource.osProfile().windowsConfiguration().provisionVMAgent()); osProfile.append("\n\t\t\t\tEnableAutomaticUpdates: ") .append(resource.osProfile().windowsConfiguration().enableAutomaticUpdates()); osProfile.append("\n\t\t\t\tTimeZone: ") .append(resource.osProfile().windowsConfiguration().timeZone()); } if (resource.osProfile().linuxConfiguration() != null) { osProfile.append("\n\t\t\tLinuxConfiguration: "); osProfile.append("\n\t\t\t\tDisablePasswordAuthentication: ") .append(resource.osProfile().linuxConfiguration().disablePasswordAuthentication()); } } else { osProfile.append("null"); } StringBuilder networkProfile = new StringBuilder().append("\n\tNetworkProfile: "); for (String networkInterfaceId : resource.networkInterfaceIds()) { networkProfile.append("\n\t\tId:").append(networkInterfaceId); } StringBuilder extensions = new StringBuilder().append("\n\tExtensions: "); for (Map.Entry<String, VirtualMachineExtension> extensionEntry : resource.listExtensions().entrySet()) { VirtualMachineExtension extension = extensionEntry.getValue(); extensions.append("\n\t\tExtension: ").append(extension.id()) .append("\n\t\t\tName: ").append(extension.name()) .append("\n\t\t\tTags: ").append(extension.tags()) .append("\n\t\t\tProvisioningState: ").append(extension.provisioningState()) .append("\n\t\t\tAuto upgrade minor version enabled: ").append(extension.autoUpgradeMinorVersionEnabled()) .append("\n\t\t\tPublisher: ").append(extension.publisherName()) .append("\n\t\t\tType: ").append(extension.typeName()) .append("\n\t\t\tVersion: ").append(extension.versionName()) .append("\n\t\t\tPublic Settings: ").append(extension.publicSettingsAsJsonString()); } StringBuilder msi = new StringBuilder().append("\n\tMSI: "); msi.append("\n\t\t\tMSI enabled:").append(resource.isManagedServiceIdentityEnabled()); msi.append("\n\t\t\tSystem Assigned MSI Active Directory Service Principal Id:").append(resource.systemAssignedManagedServiceIdentityPrincipalId()); msi.append("\n\t\t\tSystem Assigned MSI Active Directory Tenant Id:").append(resource.systemAssignedManagedServiceIdentityTenantId()); StringBuilder zones = new StringBuilder().append("\n\tZones: "); zones.append(resource.availabilityZones()); StringBuilder securityProfile = new StringBuilder().append("\n\tSecurityProfile: "); securityProfile.append("\n\t\t\tSecurity type: ").append(resource.securityType()); securityProfile.append("\n\t\t\tSecure Boot enabled: ").append(resource.isSecureBootEnabled()); securityProfile.append("\n\t\t\tvTPM enabled: ").append(resource.isVTpmEnabled()); System.out.println(new StringBuilder().append("Virtual Machine: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tHardwareProfile: ") .append("\n\t\tSize: ").append(resource.size()) .append(storageProfile) .append(osProfile) .append(networkProfile) .append(extensions) .append(msi) .append(zones) .append(securityProfile) .toString()); } /** * Print availability set info. * * @param resource an availability set */ public static void print(AvailabilitySet resource) { System.out.println(new StringBuilder().append("Availability Set: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tFault domain count: ").append(resource.faultDomainCount()) .append("\n\tUpdate domain count: ").append(resource.updateDomainCount()) .toString()); } /** * Print network info. * * @param resource a network * @throws ManagementException Cloud errors */ public static void print(Network resource) { StringBuilder info = new StringBuilder(); info.append("Network: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tAddress spaces: ").append(resource.addressSpaces()) .append("\n\tDNS server IPs: ").append(resource.dnsServerIPs()); for (Subnet subnet : resource.subnets().values()) { info.append("\n\tSubnet: ").append(subnet.name()) .append("\n\t\tAddress prefix: ").append(subnet.addressPrefix()); NetworkSecurityGroup subnetNsg = subnet.getNetworkSecurityGroup(); if (subnetNsg != null) { info.append("\n\t\tNetwork security group ID: ").append(subnetNsg.id()); } RouteTable routeTable = subnet.getRouteTable(); if (routeTable != null) { info.append("\n\tRoute table ID: ").append(routeTable.id()); } Map<ServiceEndpointType, List<Region>> services = subnet.servicesWithAccess(); if (services.size() > 0) { info.append("\n\tServices with access"); for (Map.Entry<ServiceEndpointType, List<Region>> service : services.entrySet()) { info.append("\n\t\tService: ") .append(service.getKey()) .append(" Regions: " + service.getValue() + ""); } } } for (NetworkPeering peering : resource.peerings().list()) { info.append("\n\tPeering: ").append(peering.name()) .append("\n\t\tRemote network ID: ").append(peering.remoteNetworkId()) .append("\n\t\tPeering state: ").append(peering.state()) .append("\n\t\tIs traffic forwarded from remote network allowed? ").append(peering.isTrafficForwardingFromRemoteNetworkAllowed()) .append("\n\t\tGateway use: ").append(peering.gatewayUse()); } System.out.println(info.toString()); } /** * Print network interface. * * @param resource a network interface */ public static void print(NetworkInterface resource) { StringBuilder info = new StringBuilder(); info.append("NetworkInterface: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tInternal DNS name label: ").append(resource.internalDnsNameLabel()) .append("\n\tInternal FQDN: ").append(resource.internalFqdn()) .append("\n\tInternal domain name suffix: ").append(resource.internalDomainNameSuffix()) .append("\n\tNetwork security group: ").append(resource.networkSecurityGroupId()) .append("\n\tApplied DNS servers: ").append(resource.appliedDnsServers().toString()) .append("\n\tDNS server IPs: "); for (String dnsServerIp : resource.dnsServers()) { info.append("\n\t\t").append(dnsServerIp); } info.append("\n\tIP forwarding enabled? ").append(resource.isIPForwardingEnabled()) .append("\n\tAccelerated networking enabled? ").append(resource.isAcceleratedNetworkingEnabled()) .append("\n\tMAC Address:").append(resource.macAddress()) .append("\n\tPrivate IP:").append(resource.primaryPrivateIP()) .append("\n\tPrivate allocation method:").append(resource.primaryPrivateIpAllocationMethod()) .append("\n\tPrimary virtual network ID: ").append(resource.primaryIPConfiguration().networkId()) .append("\n\tPrimary subnet name:").append(resource.primaryIPConfiguration().subnetName()); System.out.println(info.toString()); } /** * Print network security group. * * @param resource a network security group */ public static void print(NetworkSecurityGroup resource) { StringBuilder info = new StringBuilder(); info.append("NSG: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()); for (NetworkSecurityRule rule : resource.securityRules().values()) { info.append("\n\tRule: ").append(rule.name()) .append("\n\t\tAccess: ").append(rule.access()) .append("\n\t\tDirection: ").append(rule.direction()) .append("\n\t\tFrom address: ").append(rule.sourceAddressPrefix()) .append("\n\t\tFrom port range: ").append(rule.sourcePortRange()) .append("\n\t\tTo address: ").append(rule.destinationAddressPrefix()) .append("\n\t\tTo port: ").append(rule.destinationPortRange()) .append("\n\t\tProtocol: ").append(rule.protocol()) .append("\n\t\tPriority: ").append(rule.priority()); } System.out.println(info.toString()); } /** * Print public IP address. * * @param resource a public IP address */ public static void print(PublicIpAddress resource) { System.out.println(new StringBuilder().append("Public IP Address: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tIP Address: ").append(resource.ipAddress()) .append("\n\tLeaf domain label: ").append(resource.leafDomainLabel()) .append("\n\tFQDN: ").append(resource.fqdn()) .append("\n\tReverse FQDN: ").append(resource.reverseFqdn()) .append("\n\tIdle timeout (minutes): ").append(resource.idleTimeoutInMinutes()) .append("\n\tIP allocation method: ").append(resource.ipAllocationMethod()) .append("\n\tZones: ").append(resource.availabilityZones()) .toString()); } /** * Print a key vault. * * @param vault the key vault resource */ public static void print(Vault vault) { StringBuilder info = new StringBuilder().append("Key Vault: ").append(vault.id()) .append("Name: ").append(vault.name()) .append("\n\tResource group: ").append(vault.resourceGroupName()) .append("\n\tRegion: ").append(vault.region()) .append("\n\tSku: ").append(vault.sku().name()).append(" - ").append(vault.sku().family()) .append("\n\tVault URI: ").append(vault.vaultUri()) .append("\n\tAccess policies: "); for (AccessPolicy accessPolicy : vault.accessPolicies()) { info.append("\n\t\tIdentity:").append(accessPolicy.objectId()); if (accessPolicy.permissions() != null) { if (accessPolicy.permissions().keys() != null) { info.append("\n\t\tKey permissions: ").append(accessPolicy.permissions().keys().stream().map(KeyPermissions::toString).collect(Collectors.joining(", "))); } if (accessPolicy.permissions().secrets() != null) { info.append("\n\t\tSecret permissions: ").append(accessPolicy.permissions().secrets().stream().map(SecretPermissions::toString).collect(Collectors.joining(", "))); } if (accessPolicy.permissions().certificates() != null) { info.append("\n\t\tCertificate permissions: ").append(accessPolicy.permissions().certificates().stream().map(CertificatePermissions::toString).collect(Collectors.joining(", "))); } } } System.out.println(info.toString()); } /** * Print storage account. * * @param storageAccount a storage account */ public static void print(StorageAccount storageAccount) { System.out.println(storageAccount.name() + " created @ " + storageAccount.creationTime()); StringBuilder info = new StringBuilder().append("Storage Account: ").append(storageAccount.id()) .append("Name: ").append(storageAccount.name()) .append("\n\tResource group: ").append(storageAccount.resourceGroupName()) .append("\n\tRegion: ").append(storageAccount.region()) .append("\n\tSKU: ").append(storageAccount.skuType().name().toString()) .append("\n\tAccessTier: ").append(storageAccount.accessTier()) .append("\n\tKind: ").append(storageAccount.kind()); info.append("\n\tNetwork Rule Configuration: ") .append("\n\t\tAllow reading logs from any network: ").append(storageAccount.canReadLogEntriesFromAnyNetwork()) .append("\n\t\tAllow reading metrics from any network: ").append(storageAccount.canReadMetricsFromAnyNetwork()) .append("\n\t\tAllow access from all azure services: ").append(storageAccount.canAccessFromAzureServices()); if (storageAccount.networkSubnetsWithAccess().size() > 0) { info.append("\n\t\tNetwork subnets with access: "); for (String subnetId : storageAccount.networkSubnetsWithAccess()) { info.append("\n\t\t\t").append(subnetId); } } if (storageAccount.ipAddressesWithAccess().size() > 0) { info.append("\n\t\tIP addresses with access: "); for (String ipAddress : storageAccount.ipAddressesWithAccess()) { info.append("\n\t\t\t").append(ipAddress); } } if (storageAccount.ipAddressRangesWithAccess().size() > 0) { info.append("\n\t\tIP address-ranges with access: "); for (String ipAddressRange : storageAccount.ipAddressRangesWithAccess()) { info.append("\n\t\t\t").append(ipAddressRange); } } info.append("\n\t\tTraffic allowed from only HTTPS: ").append(storageAccount.innerModel().enableHttpsTrafficOnly()); info.append("\n\tEncryption status: "); info.append("\n\t\tInfrastructure Encryption: ").append(storageAccount.infrastructureEncryptionEnabled() ? "Enabled" : "Disabled"); for (Map.Entry<StorageService, StorageAccountEncryptionStatus> eStatus : storageAccount.encryptionStatuses().entrySet()) { info.append("\n\t\t").append(eStatus.getValue().storageService()).append(": ").append(eStatus.getValue().isEnabled() ? "Enabled" : "Disabled"); } System.out.println(info.toString()); } /** * Print storage account keys. * * @param storageAccountKeys a list of storage account keys */ public static void print(List<StorageAccountKey> storageAccountKeys) { for (int i = 0; i < storageAccountKeys.size(); i++) { StorageAccountKey storageAccountKey = storageAccountKeys.get(i); System.out.println("Key (" + i + ") " + storageAccountKey.keyName() + "=" + storageAccountKey.value()); } } /** * Print Redis Cache. * * @param redisCache a Redis cache. */ public static void print(RedisCache redisCache) { StringBuilder redisInfo = new StringBuilder() .append("Redis Cache Name: ").append(redisCache.name()) .append("\n\tResource group: ").append(redisCache.resourceGroupName()) .append("\n\tRegion: ").append(redisCache.region()) .append("\n\tSKU Name: ").append(redisCache.sku().name()) .append("\n\tSKU Family: ").append(redisCache.sku().family()) .append("\n\tHostname: ").append(redisCache.hostname()) .append("\n\tSSL port: ").append(redisCache.sslPort()) .append("\n\tNon-SSL port (6379) enabled: ").append(redisCache.nonSslPort()); if (redisCache.redisConfiguration() != null && !redisCache.redisConfiguration().isEmpty()) { redisInfo.append("\n\tRedis Configuration:"); for (Map.Entry<String, String> redisConfiguration : redisCache.redisConfiguration().entrySet()) { redisInfo.append("\n\t '").append(redisConfiguration.getKey()) .append("' : '").append(redisConfiguration.getValue()).append("'"); } } if (redisCache.isPremium()) { RedisCachePremium premium = redisCache.asPremium(); List<ScheduleEntry> scheduleEntries = premium.listPatchSchedules(); if (scheduleEntries != null && !scheduleEntries.isEmpty()) { redisInfo.append("\n\tRedis Patch Schedule:"); for (ScheduleEntry schedule : scheduleEntries) { redisInfo.append("\n\t\tDay: '").append(schedule.dayOfWeek()) .append("', start at: '").append(schedule.startHourUtc()) .append("', maintenance window: '").append(schedule.maintenanceWindow()) .append("'"); } } } System.out.println(redisInfo.toString()); } /** * Print Redis Cache access keys. * * @param redisAccessKeys a keys for Redis Cache */ public static void print(RedisAccessKeys redisAccessKeys) { StringBuilder redisKeys = new StringBuilder() .append("Redis Access Keys: ") .append("\n\tPrimary Key: '").append(redisAccessKeys.primaryKey()).append("', ") .append("\n\tSecondary Key: '").append(redisAccessKeys.secondaryKey()).append("', "); System.out.println(redisKeys.toString()); } /** * Print management lock. * * @param lock a management lock */ public static void print(ManagementLock lock) { StringBuilder info = new StringBuilder(); info.append("\nLock ID: ").append(lock.id()) .append("\nLocked resource ID: ").append(lock.lockedResourceId()) .append("\nLevel: ").append(lock.level()); System.out.println(info.toString()); } /** * Print load balancer. * * @param resource a load balancer */ public static void print(LoadBalancer resource) { StringBuilder info = new StringBuilder(); info.append("Load balancer: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tBackends: ").append(resource.backends().keySet().toString()); info.append("\n\tPublic IP address IDs: ") .append(resource.publicIpAddressIds().size()); for (String pipId : resource.publicIpAddressIds()) { info.append("\n\t\tPIP id: ").append(pipId); } info.append("\n\tTCP probes: ") .append(resource.tcpProbes().size()); for (LoadBalancerTcpProbe probe : resource.tcpProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tHTTP probes: ") .append(resource.httpProbes().size()); for (LoadBalancerHttpProbe probe : resource.httpProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()) .append("\n\t\t\tHTTP request path: ").append(probe.requestPath()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tHTTPS probes: ") .append(resource.httpsProbes().size()); for (LoadBalancerHttpProbe probe : resource.httpsProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()) .append("\n\t\t\tHTTPS request path: ").append(probe.requestPath()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tLoad balancing rules: ") .append(resource.loadBalancingRules().size()); for (LoadBalancingRule rule : resource.loadBalancingRules().values()) { info.append("\n\t\tLB rule name: ").append(rule.name()) .append("\n\t\t\tProtocol: ").append(rule.protocol()) .append("\n\t\t\tFloating IP enabled? ").append(rule.floatingIPEnabled()) .append("\n\t\t\tIdle timeout in minutes: ").append(rule.idleTimeoutInMinutes()) .append("\n\t\t\tLoad distribution method: ").append(rule.loadDistribution().toString()); LoadBalancerFrontend frontend = rule.frontend(); info.append("\n\t\t\tFrontend: "); if (frontend != null) { info.append(frontend.name()); } else { info.append("(None)"); } info.append("\n\t\t\tFrontend port: ").append(rule.frontendPort()); LoadBalancerBackend backend = rule.backend(); info.append("\n\t\t\tBackend: "); if (backend != null) { info.append(backend.name()); } else { info.append("(None)"); } info.append("\n\t\t\tBackend port: ").append(rule.backendPort()); LoadBalancerProbe probe = rule.probe(); info.append("\n\t\t\tProbe: "); if (probe == null) { info.append("(None)"); } else { info.append(probe.name()).append(" [").append(probe.protocol().toString()).append("]"); } } info.append("\n\tFrontends: ") .append(resource.frontends().size()); for (LoadBalancerFrontend frontend : resource.frontends().values()) { info.append("\n\t\tFrontend name: ").append(frontend.name()) .append("\n\t\t\tInternet facing: ").append(frontend.isPublic()); if (frontend.isPublic()) { info.append("\n\t\t\tPublic IP Address ID: ").append(((LoadBalancerPublicFrontend) frontend).publicIpAddressId()); } else { info.append("\n\t\t\tVirtual network ID: ").append(((LoadBalancerPrivateFrontend) frontend).networkId()) .append("\n\t\t\tSubnet name: ").append(((LoadBalancerPrivateFrontend) frontend).subnetName()) .append("\n\t\t\tPrivate IP address: ").append(((LoadBalancerPrivateFrontend) frontend).privateIpAddress()) .append("\n\t\t\tPrivate IP allocation method: ").append(((LoadBalancerPrivateFrontend) frontend).privateIpAllocationMethod()); } info.append("\n\t\t\tReferenced inbound NAT pools: ") .append(frontend.inboundNatPools().size()); for (LoadBalancerInboundNatPool pool : frontend.inboundNatPools().values()) { info.append("\n\t\t\t\tName: ").append(pool.name()); } info.append("\n\t\t\tReferenced inbound NAT rules: ") .append(frontend.inboundNatRules().size()); for (LoadBalancerInboundNatRule rule : frontend.inboundNatRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } info.append("\n\t\t\tReferenced load balancing rules: ") .append(frontend.loadBalancingRules().size()); for (LoadBalancingRule rule : frontend.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tInbound NAT rules: ") .append(resource.inboundNatRules().size()); for (LoadBalancerInboundNatRule natRule : resource.inboundNatRules().values()) { info.append("\n\t\tInbound NAT rule name: ").append(natRule.name()) .append("\n\t\t\tProtocol: ").append(natRule.protocol().toString()) .append("\n\t\t\tFrontend: ").append(natRule.frontend().name()) .append("\n\t\t\tFrontend port: ").append(natRule.frontendPort()) .append("\n\t\t\tBackend port: ").append(natRule.backendPort()) .append("\n\t\t\tBackend NIC ID: ").append(natRule.backendNetworkInterfaceId()) .append("\n\t\t\tBackend NIC IP config name: ").append(natRule.backendNicIpConfigurationName()) .append("\n\t\t\tFloating IP? ").append(natRule.floatingIPEnabled()) .append("\n\t\t\tIdle timeout in minutes: ").append(natRule.idleTimeoutInMinutes()); } info.append("\n\tInbound NAT pools: ") .append(resource.inboundNatPools().size()); for (LoadBalancerInboundNatPool natPool : resource.inboundNatPools().values()) { info.append("\n\t\tInbound NAT pool name: ").append(natPool.name()) .append("\n\t\t\tProtocol: ").append(natPool.protocol().toString()) .append("\n\t\t\tFrontend: ").append(natPool.frontend().name()) .append("\n\t\t\tFrontend port range: ") .append(natPool.frontendPortRangeStart()) .append("-") .append(natPool.frontendPortRangeEnd()) .append("\n\t\t\tBackend port: ").append(natPool.backendPort()); } info.append("\n\tBackends: ") .append(resource.backends().size()); for (LoadBalancerBackend backend : resource.backends().values()) { info.append("\n\t\tBackend name: ").append(backend.name()); info.append("\n\t\t\tReferenced NICs: ") .append(backend.backendNicIPConfigurationNames().entrySet().size()); for (Map.Entry<String, String> entry : backend.backendNicIPConfigurationNames().entrySet()) { info.append("\n\t\t\t\tNIC ID: ").append(entry.getKey()) .append(" - IP Config: ").append(entry.getValue()); } Set<String> vmIds = backend.getVirtualMachineIds(); info.append("\n\t\t\tReferenced virtual machine ids: ") .append(vmIds.size()); for (String vmId : vmIds) { info.append("\n\t\t\t\tVM ID: ").append(vmId); } info.append("\n\t\t\tReferenced load balancing rules: ") .append(new ArrayList<String>(backend.loadBalancingRules().keySet())); } System.out.println(info.toString()); } /** * Print app service domain. * * @param resource an app service domain */ public static void print(AppServiceDomain resource) { StringBuilder builder = new StringBuilder().append("Domain: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tCreated time: ").append(resource.createdTime()) .append("\n\tExpiration time: ").append(resource.expirationTime()) .append("\n\tContact: "); Contact contact = resource.registrantContact(); if (contact == null) { builder = builder.append("Private"); } else { builder = builder.append("\n\t\tName: ").append(contact.nameFirst() + " " + contact.nameLast()); } builder = builder.append("\n\tName servers: "); for (String nameServer : resource.nameServers()) { builder = builder.append("\n\t\t" + nameServer); } System.out.println(builder.toString()); } /** * Print app service certificate order. * * @param resource an app service certificate order */ public static void print(AppServiceCertificateOrder resource) { StringBuilder builder = new StringBuilder().append("App service certificate order: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDistinguished name: ").append(resource.distinguishedName()) .append("\n\tProduct type: ").append(resource.productType()) .append("\n\tValid years: ").append(resource.validityInYears()) .append("\n\tStatus: ").append(resource.status()) .append("\n\tIssuance time: ").append(resource.lastCertificateIssuanceTime()) .append("\n\tSigned certificate: ").append(resource.signedCertificate() == null ? null : resource.signedCertificate().thumbprint()); System.out.println(builder.toString()); } /** * Print app service plan. * * @param resource an app service plan */ public static void print(AppServicePlan resource) { StringBuilder builder = new StringBuilder().append("App service certificate order: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tPricing tier: ").append(resource.pricingTier()); System.out.println(builder.toString()); } /** * Print a web app. * * @param resource a web app */ public static void print(WebAppBase resource) { StringBuilder builder = new StringBuilder().append("Web app: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tState: ").append(resource.state()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDefault hostname: ").append(resource.defaultHostname()) .append("\n\tApp service plan: ").append(resource.appServicePlanId()) .append("\n\tHost name bindings: "); for (HostnameBinding binding : resource.getHostnameBindings().values()) { builder = builder.append("\n\t\t" + binding.toString()); } builder = builder.append("\n\tSSL bindings: "); for (HostnameSslState binding : resource.hostnameSslStates().values()) { builder = builder.append("\n\t\t" + binding.name() + ": " + binding.sslState()); if (binding.sslState() != null && binding.sslState() != SslState.DISABLED) { builder = builder.append(" - " + binding.thumbprint()); } } builder = builder.append("\n\tApp settings: "); for (AppSetting setting : resource.getAppSettings().values()) { builder = builder.append("\n\t\t" + setting.key() + ": " + setting.value() + (setting.sticky() ? " - slot setting" : "")); } builder = builder.append("\n\tConnection strings: "); for (ConnectionString conn : resource.getConnectionStrings().values()) { builder = builder.append("\n\t\t" + conn.name() + ": " + conn.value() + " - " + conn.type() + (conn.sticky() ? " - slot setting" : "")); } System.out.println(builder.toString()); } /** * Print a web site. * * @param resource a web site */ public static void print(WebSiteBase resource) { StringBuilder builder = new StringBuilder().append("Web app: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tState: ").append(resource.state()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDefault hostname: ").append(resource.defaultHostname()) .append("\n\tApp service plan: ").append(resource.appServicePlanId()); builder = builder.append("\n\tSSL bindings: "); for (HostnameSslState binding : resource.hostnameSslStates().values()) { builder = builder.append("\n\t\t" + binding.name() + ": " + binding.sslState()); if (binding.sslState() != null && binding.sslState() != SslState.DISABLED) { builder = builder.append(" - " + binding.thumbprint()); } } System.out.println(builder.toString()); } /** * Print a traffic manager profile. * * @param profile a traffic manager profile */ public static void print(TrafficManagerProfile profile) { StringBuilder info = new StringBuilder(); info.append("Traffic Manager Profile: ").append(profile.id()) .append("\n\tName: ").append(profile.name()) .append("\n\tResource group: ").append(profile.resourceGroupName()) .append("\n\tRegion: ").append(profile.regionName()) .append("\n\tTags: ").append(profile.tags()) .append("\n\tDNSLabel: ").append(profile.dnsLabel()) .append("\n\tFQDN: ").append(profile.fqdn()) .append("\n\tTTL: ").append(profile.timeToLive()) .append("\n\tEnabled: ").append(profile.isEnabled()) .append("\n\tRoutingMethod: ").append(profile.trafficRoutingMethod()) .append("\n\tMonitor status: ").append(profile.monitorStatus()) .append("\n\tMonitoring port: ").append(profile.monitoringPort()) .append("\n\tMonitoring path: ").append(profile.monitoringPath()); Map<String, TrafficManagerAzureEndpoint> azureEndpoints = profile.azureEndpoints(); if (!azureEndpoints.isEmpty()) { info.append("\n\tAzure endpoints:"); int idx = 1; for (TrafficManagerAzureEndpoint endpoint : azureEndpoints.values()) { info.append("\n\t\tAzure endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tTarget resourceId: ").append(endpoint.targetAzureResourceId()) .append("\n\t\t\tTarget resourceType: ").append(endpoint.targetResourceType()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } Map<String, TrafficManagerExternalEndpoint> externalEndpoints = profile.externalEndpoints(); if (!externalEndpoints.isEmpty()) { info.append("\n\tExternal endpoints:"); int idx = 1; for (TrafficManagerExternalEndpoint endpoint : externalEndpoints.values()) { info.append("\n\t\tExternal endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tFQDN: ").append(endpoint.fqdn()) .append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } Map<String, TrafficManagerNestedProfileEndpoint> nestedProfileEndpoints = profile.nestedProfileEndpoints(); if (!nestedProfileEndpoints.isEmpty()) { info.append("\n\tNested profile endpoints:"); int idx = 1; for (TrafficManagerNestedProfileEndpoint endpoint : nestedProfileEndpoints.values()) { info.append("\n\t\tNested profile endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tNested profileId: ").append(endpoint.nestedProfileId()) .append("\n\t\t\tMinimum child threshold: ").append(endpoint.minimumChildEndpointCount()) .append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } System.out.println(info.toString()); } /** * Print a dns zone. * * @param dnsZone a dns zone */ public static void print(DnsZone dnsZone) { StringBuilder info = new StringBuilder(); info.append("DNS Zone: ").append(dnsZone.id()) .append("\n\tName (Top level domain): ").append(dnsZone.name()) .append("\n\tResource group: ").append(dnsZone.resourceGroupName()) .append("\n\tRegion: ").append(dnsZone.regionName()) .append("\n\tTags: ").append(dnsZone.tags()) .append("\n\tName servers:"); for (String nameServer : dnsZone.nameServers()) { info.append("\n\t\t").append(nameServer); } SoaRecordSet soaRecordSet = dnsZone.getSoaRecordSet(); SoaRecord soaRecord = soaRecordSet.record(); info.append("\n\tSOA Record:") .append("\n\t\tHost:").append(soaRecord.host()) .append("\n\t\tEmail:").append(soaRecord.email()) .append("\n\t\tExpire time (seconds):").append(soaRecord.expireTime()) .append("\n\t\tRefresh time (seconds):").append(soaRecord.refreshTime()) .append("\n\t\tRetry time (seconds):").append(soaRecord.retryTime()) .append("\n\t\tNegative response cache ttl (seconds):").append(soaRecord.minimumTtl()) .append("\n\t\tTTL (seconds):").append(soaRecordSet.timeToLive()); PagedIterable<ARecordSet> aRecordSets = dnsZone.aRecordSets().list(); info.append("\n\tA Record sets:"); for (ARecordSet aRecordSet : aRecordSets) { info.append("\n\t\tId: ").append(aRecordSet.id()) .append("\n\t\tName: ").append(aRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aRecordSet.timeToLive()) .append("\n\t\tIP v4 addresses: "); for (String ipAddress : aRecordSet.ipv4Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<AaaaRecordSet> aaaaRecordSets = dnsZone.aaaaRecordSets().list(); info.append("\n\tAAAA Record sets:"); for (AaaaRecordSet aaaaRecordSet : aaaaRecordSets) { info.append("\n\t\tId: ").append(aaaaRecordSet.id()) .append("\n\t\tName: ").append(aaaaRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aaaaRecordSet.timeToLive()) .append("\n\t\tIP v6 addresses: "); for (String ipAddress : aaaaRecordSet.ipv6Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cNameRecordSets().list(); info.append("\n\tCNAME Record sets:"); for (CnameRecordSet cnameRecordSet : cnameRecordSets) { info.append("\n\t\tId: ").append(cnameRecordSet.id()) .append("\n\t\tName: ").append(cnameRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(cnameRecordSet.timeToLive()) .append("\n\t\tCanonical name: ").append(cnameRecordSet.canonicalName()); } PagedIterable<MxRecordSet> mxRecordSets = dnsZone.mxRecordSets().list(); info.append("\n\tMX Record sets:"); for (MxRecordSet mxRecordSet : mxRecordSets) { info.append("\n\t\tId: ").append(mxRecordSet.id()) .append("\n\t\tName: ").append(mxRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(mxRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (MxRecord mxRecord : mxRecordSet.records()) { info.append("\n\t\t\tExchange server, Preference: ") .append(mxRecord.exchange()) .append(" ") .append(mxRecord.preference()); } } PagedIterable<NsRecordSet> nsRecordSets = dnsZone.nsRecordSets().list(); info.append("\n\tNS Record sets:"); for (NsRecordSet nsRecordSet : nsRecordSets) { info.append("\n\t\tId: ").append(nsRecordSet.id()) .append("\n\t\tName: ").append(nsRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(nsRecordSet.timeToLive()) .append("\n\t\tName servers: "); for (String nameServer : nsRecordSet.nameServers()) { info.append("\n\t\t\t").append(nameServer); } } PagedIterable<PtrRecordSet> ptrRecordSets = dnsZone.ptrRecordSets().list(); info.append("\n\tPTR Record sets:"); for (PtrRecordSet ptrRecordSet : ptrRecordSets) { info.append("\n\t\tId: ").append(ptrRecordSet.id()) .append("\n\t\tName: ").append(ptrRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(ptrRecordSet.timeToLive()) .append("\n\t\tTarget domain names: "); for (String domainNames : ptrRecordSet.targetDomainNames()) { info.append("\n\t\t\t").append(domainNames); } } PagedIterable<SrvRecordSet> srvRecordSets = dnsZone.srvRecordSets().list(); info.append("\n\tSRV Record sets:"); for (SrvRecordSet srvRecordSet : srvRecordSets) { info.append("\n\t\tId: ").append(srvRecordSet.id()) .append("\n\t\tName: ").append(srvRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(srvRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (SrvRecord srvRecord : srvRecordSet.records()) { info.append("\n\t\t\tTarget, Port, Priority, Weight: ") .append(srvRecord.target()) .append(", ") .append(srvRecord.port()) .append(", ") .append(srvRecord.priority()) .append(", ") .append(srvRecord.weight()); } } PagedIterable<TxtRecordSet> txtRecordSets = dnsZone.txtRecordSets().list(); info.append("\n\tTXT Record sets:"); for (TxtRecordSet txtRecordSet : txtRecordSets) { info.append("\n\t\tId: ").append(txtRecordSet.id()) .append("\n\t\tName: ").append(txtRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(txtRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (TxtRecord txtRecord : txtRecordSet.records()) { if (txtRecord.value().size() > 0) { info.append("\n\t\t\tValue: ").append(txtRecord.value().get(0)); } } } System.out.println(info.toString()); } /** * Print a private dns zone. * * @param privateDnsZone a private dns zone */ public static void print(PrivateDnsZone privateDnsZone) { StringBuilder info = new StringBuilder(); info.append("Private DNS Zone: ").append(privateDnsZone.id()) .append("\n\tName (Top level domain): ").append(privateDnsZone.name()) .append("\n\tResource group: ").append(privateDnsZone.resourceGroupName()) .append("\n\tRegion: ").append(privateDnsZone.regionName()) .append("\n\tTags: ").append(privateDnsZone.tags()) .append("\n\tName servers:"); com.azure.resourcemanager.privatedns.models.SoaRecordSet soaRecordSet = privateDnsZone.getSoaRecordSet(); com.azure.resourcemanager.privatedns.models.SoaRecord soaRecord = soaRecordSet.record(); info.append("\n\tSOA Record:") .append("\n\t\tHost:").append(soaRecord.host()) .append("\n\t\tEmail:").append(soaRecord.email()) .append("\n\t\tExpire time (seconds):").append(soaRecord.expireTime()) .append("\n\t\tRefresh time (seconds):").append(soaRecord.refreshTime()) .append("\n\t\tRetry time (seconds):").append(soaRecord.retryTime()) .append("\n\t\tNegative response cache ttl (seconds):").append(soaRecord.minimumTtl()) .append("\n\t\tTTL (seconds):").append(soaRecordSet.timeToLive()); PagedIterable<com.azure.resourcemanager.privatedns.models.ARecordSet> aRecordSets = privateDnsZone .aRecordSets().list(); info.append("\n\tA Record sets:"); for (com.azure.resourcemanager.privatedns.models.ARecordSet aRecordSet : aRecordSets) { info.append("\n\t\tId: ").append(aRecordSet.id()) .append("\n\t\tName: ").append(aRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aRecordSet.timeToLive()) .append("\n\t\tIP v4 addresses: "); for (String ipAddress : aRecordSet.ipv4Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<com.azure.resourcemanager.privatedns.models.AaaaRecordSet> aaaaRecordSets = privateDnsZone .aaaaRecordSets().list(); info.append("\n\tAAAA Record sets:"); for (com.azure.resourcemanager.privatedns.models.AaaaRecordSet aaaaRecordSet : aaaaRecordSets) { info.append("\n\t\tId: ").append(aaaaRecordSet.id()) .append("\n\t\tName: ").append(aaaaRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aaaaRecordSet.timeToLive()) .append("\n\t\tIP v6 addresses: "); for (String ipAddress : aaaaRecordSet.ipv6Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<com.azure.resourcemanager.privatedns.models.CnameRecordSet> cnameRecordSets = privateDnsZone.cnameRecordSets().list(); info.append("\n\tCNAME Record sets:"); for (com.azure.resourcemanager.privatedns.models.CnameRecordSet cnameRecordSet : cnameRecordSets) { info.append("\n\t\tId: ").append(cnameRecordSet.id()) .append("\n\t\tName: ").append(cnameRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(cnameRecordSet.timeToLive()) .append("\n\t\tCanonical name: ").append(cnameRecordSet.canonicalName()); } PagedIterable<com.azure.resourcemanager.privatedns.models.MxRecordSet> mxRecordSets = privateDnsZone.mxRecordSets().list(); info.append("\n\tMX Record sets:"); for (com.azure.resourcemanager.privatedns.models.MxRecordSet mxRecordSet : mxRecordSets) { info.append("\n\t\tId: ").append(mxRecordSet.id()) .append("\n\t\tName: ").append(mxRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(mxRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.MxRecord mxRecord : mxRecordSet.records()) { info.append("\n\t\t\tExchange server, Preference: ") .append(mxRecord.exchange()) .append(" ") .append(mxRecord.preference()); } } PagedIterable<com.azure.resourcemanager.privatedns.models.PtrRecordSet> ptrRecordSets = privateDnsZone .ptrRecordSets().list(); info.append("\n\tPTR Record sets:"); for (com.azure.resourcemanager.privatedns.models.PtrRecordSet ptrRecordSet : ptrRecordSets) { info.append("\n\t\tId: ").append(ptrRecordSet.id()) .append("\n\t\tName: ").append(ptrRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(ptrRecordSet.timeToLive()) .append("\n\t\tTarget domain names: "); for (String domainNames : ptrRecordSet.targetDomainNames()) { info.append("\n\t\t\t").append(domainNames); } } PagedIterable<com.azure.resourcemanager.privatedns.models.SrvRecordSet> srvRecordSets = privateDnsZone .srvRecordSets().list(); info.append("\n\tSRV Record sets:"); for (com.azure.resourcemanager.privatedns.models.SrvRecordSet srvRecordSet : srvRecordSets) { info.append("\n\t\tId: ").append(srvRecordSet.id()) .append("\n\t\tName: ").append(srvRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(srvRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.SrvRecord srvRecord : srvRecordSet.records()) { info.append("\n\t\t\tTarget, Port, Priority, Weight: ") .append(srvRecord.target()) .append(", ") .append(srvRecord.port()) .append(", ") .append(srvRecord.priority()) .append(", ") .append(srvRecord.weight()); } } PagedIterable<com.azure.resourcemanager.privatedns.models.TxtRecordSet> txtRecordSets = privateDnsZone .txtRecordSets().list(); info.append("\n\tTXT Record sets:"); for (com.azure.resourcemanager.privatedns.models.TxtRecordSet txtRecordSet : txtRecordSets) { info.append("\n\t\tId: ").append(txtRecordSet.id()) .append("\n\t\tName: ").append(txtRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(txtRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.TxtRecord txtRecord : txtRecordSet.records()) { if (txtRecord.value().size() > 0) { info.append("\n\t\t\tValue: ").append(txtRecord.value().get(0)); } } } PagedIterable<VirtualNetworkLink> virtualNetworkLinks = privateDnsZone.virtualNetworkLinks().list(); info.append("\n\tVirtual Network Links:"); for (VirtualNetworkLink virtualNetworkLink : virtualNetworkLinks) { info.append("\n\tId: ").append(virtualNetworkLink.id()) .append("\n\tName: ").append(virtualNetworkLink.name()) .append("\n\tReference of Virtual Network: ").append(virtualNetworkLink.referencedVirtualNetworkId()) .append("\n\tRegistration enabled: ").append(virtualNetworkLink.isAutoRegistrationEnabled()); } System.out.println(info.toString()); } /** * Print an Azure Container Registry. * * @param azureRegistry an Azure Container Registry */ public static void print(Registry azureRegistry) { StringBuilder info = new StringBuilder(); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); info.append("Azure Container Registry: ").append(azureRegistry.id()) .append("\n\tName: ").append(azureRegistry.name()) .append("\n\tServer Url: ").append(azureRegistry.loginServerUrl()) .append("\n\tUser: ").append(acrCredentials.username()) .append("\n\tFirst Password: ").append(acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)) .append("\n\tSecond Password: ").append(acrCredentials.accessKeys().get(AccessKeyType.SECONDARY)); System.out.println(info.toString()); } /** * Print an Azure Container Service (AKS). * * @param kubernetesCluster a managed container service */ public static void print(KubernetesCluster kubernetesCluster) { StringBuilder info = new StringBuilder(); info.append("Azure Container Service: ").append(kubernetesCluster.id()) .append("\n\tName: ").append(kubernetesCluster.name()) .append("\n\tFQDN: ").append(kubernetesCluster.fqdn()) .append("\n\tDNS prefix label: ").append(kubernetesCluster.dnsPrefix()) .append("\n\t\tWith Agent pool name: ").append(new ArrayList<>(kubernetesCluster.agentPools().keySet()).get(0)) .append("\n\t\tAgent pool count: ").append(new ArrayList<>(kubernetesCluster.agentPools().values()).get(0).count()) .append("\n\t\tAgent pool VM size: ").append(new ArrayList<>(kubernetesCluster.agentPools().values()).get(0).vmSize().toString()) .append("\n\tLinux user name: ").append(kubernetesCluster.linuxRootUsername()) .append("\n\tSSH key: ").append(kubernetesCluster.sshKey()) .append("\n\tService principal client ID: ").append(kubernetesCluster.servicePrincipalClientId()); System.out.println(info.toString()); } /** * Print an Azure Search Service. * * @param searchService an Azure Search Service */ public static void print(SearchService searchService) { StringBuilder info = new StringBuilder(); AdminKeys adminKeys = searchService.getAdminKeys(); PagedIterable<QueryKey> queryKeys = searchService.listQueryKeys(); info.append("Azure Search: ").append(searchService.id()) .append("\n\tResource group: ").append(searchService.resourceGroupName()) .append("\n\tRegion: ").append(searchService.region()) .append("\n\tTags: ").append(searchService.tags()) .append("\n\tSku: ").append(searchService.sku().name()) .append("\n\tStatus: ").append(searchService.status()) .append("\n\tProvisioning State: ").append(searchService.provisioningState()) .append("\n\tHosting Mode: ").append(searchService.hostingMode()) .append("\n\tReplicas: ").append(searchService.replicaCount()) .append("\n\tPartitions: ").append(searchService.partitionCount()) .append("\n\tPrimary Admin Key: ").append(adminKeys.primaryKey()) .append("\n\tSecondary Admin Key: ").append(adminKeys.secondaryKey()) .append("\n\tQuery keys:"); for (QueryKey queryKey : queryKeys) { info.append("\n\t\tKey name: ").append(queryKey.name()); info.append("\n\t\t Value: ").append(queryKey.key()); } System.out.println(info.toString()); } /** * Retrieve the secondary service principal client ID. * * @param envSecondaryServicePrincipal an Azure Container Registry * @return a service principal client ID * @throws IOException exception */
class Utils { private static final ClientLogger LOGGER = new ClientLogger(Utils.class); private static String sshPublicKey; private Utils() { } /** @return a generated password */ public static String password() { String password = new ResourceManagerUtils.InternalRuntimeContext().randomResourceName("Pa5$", 12); System.out.printf("Password: %s%n", password); return password; } /** * @return an SSH public key */ public static String sshPublicKey() { if (sshPublicKey == null) { try { KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); keyGen.initialize(1024); KeyPair pair = keyGen.generateKeyPair(); PublicKey publicKey = pair.getPublic(); RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; ByteArrayOutputStream byteOs = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(byteOs); dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length); dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII)); dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length); dos.write(rsaPublicKey.getPublicExponent().toByteArray()); dos.writeInt(rsaPublicKey.getModulus().toByteArray().length); dos.write(rsaPublicKey.getModulus().toByteArray()); String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII); sshPublicKey = "ssh-rsa " + publicKeyEncoded; } catch (NoSuchAlgorithmException | IOException e) { throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e)); } } return sshPublicKey; } /** * Creates a randomized resource name. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @param prefix the prefix to the name. * @param maxLen the max length of the name. * @return the randomized resource name. */ public static String randomResourceName(AzureResourceManager azure, String prefix, int maxLen) { return azure.resourceGroups().manager().internalContext().randomResourceName(prefix, maxLen); } /** * Generates the specified number of random resource names with the same prefix. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @param prefix the prefix to be used if possible * @param maxLen the maximum length for the random generated name * @param count the number of names to generate * @return the randomized resource names. */ public static String[] randomResourceNames(AzureResourceManager azure, String prefix, int maxLen, int count) { String[] names = new String[count]; for (int i = 0; i < count; i++) { names[i] = randomResourceName(azure, prefix, maxLen); } return names; } /** * Creates a random UUID. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param azure the AzureResourceManager instance. * @return the random UUID. */ public static String randomUuid(AzureResourceManager azure) { return azure.resourceGroups().manager().internalContext().randomUuid(); } /** * Creates a randomized resource name. * Please provider your own implementation, or avoid using the method, if code is to be used in production. * * @param authenticated the AzureResourceManager.Authenticated instance. * @param prefix the prefix to the name. * @param maxLen the max length of the name. * @return the randomized resource name. */ public static String randomResourceName(AzureResourceManager.Authenticated authenticated, String prefix, int maxLen) { return authenticated.roleAssignments().manager().internalContext().randomResourceName(prefix, maxLen); } /** * Print resource group info. * * @param resource a resource group */ public static void print(ResourceGroup resource) { StringBuilder info = new StringBuilder(); info.append("Resource Group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()); System.out.println(info.toString()); } /** * Print User Assigned MSI info. * * @param resource a User Assigned MSI */ public static void print(Identity resource) { StringBuilder info = new StringBuilder(); info.append("Resource Group: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tService Principal Id: ").append(resource.principalId()) .append("\n\tClient Id: ").append(resource.clientId()) .append("\n\tTenant Id: ").append(resource.tenantId()); System.out.println(info.toString()); } /** * Print virtual machine info. * * @param resource a virtual machine */ public static void print(VirtualMachine resource) { StringBuilder storageProfile = new StringBuilder().append("\n\tStorageProfile: "); if (resource.storageProfile().imageReference() != null) { storageProfile.append("\n\t\tImageReference:"); storageProfile.append("\n\t\t\tPublisher: ").append(resource.storageProfile().imageReference().publisher()); storageProfile.append("\n\t\t\tOffer: ").append(resource.storageProfile().imageReference().offer()); storageProfile.append("\n\t\t\tSKU: ").append(resource.storageProfile().imageReference().sku()); storageProfile.append("\n\t\t\tVersion: ").append(resource.storageProfile().imageReference().version()); } if (resource.storageProfile().osDisk() != null) { storageProfile.append("\n\t\tOSDisk:"); storageProfile.append("\n\t\t\tOSType: ").append(resource.storageProfile().osDisk().osType()); storageProfile.append("\n\t\t\tName: ").append(resource.storageProfile().osDisk().name()); storageProfile.append("\n\t\t\tCaching: ").append(resource.storageProfile().osDisk().caching()); storageProfile.append("\n\t\t\tCreateOption: ").append(resource.storageProfile().osDisk().createOption()); storageProfile.append("\n\t\t\tDiskSizeGB: ").append(resource.storageProfile().osDisk().diskSizeGB()); if (resource.storageProfile().osDisk().managedDisk() != null) { if (resource.storageProfile().osDisk().managedDisk().diskEncryptionSet() != null) { storageProfile.append("\n\t\t\tDiskEncryptionSet Id: ") .append(resource.storageProfile().osDisk().managedDisk().diskEncryptionSet().id()); } } if (resource.storageProfile().osDisk().image() != null) { storageProfile.append("\n\t\t\tImage Uri: ").append(resource.storageProfile().osDisk().image().uri()); } if (resource.storageProfile().osDisk().vhd() != null) { storageProfile.append("\n\t\t\tVhd Uri: ").append(resource.storageProfile().osDisk().vhd().uri()); } if (resource.storageProfile().osDisk().encryptionSettings() != null) { storageProfile.append("\n\t\t\tEncryptionSettings: "); storageProfile.append("\n\t\t\t\tEnabled: ").append(resource.storageProfile().osDisk().encryptionSettings().enabled()); storageProfile.append("\n\t\t\t\tDiskEncryptionKey Uri: ").append(resource .storageProfile() .osDisk() .encryptionSettings() .diskEncryptionKey().secretUrl()); storageProfile.append("\n\t\t\t\tKeyEncryptionKey Uri: ").append(resource .storageProfile() .osDisk() .encryptionSettings() .keyEncryptionKey().keyUrl()); } } if (resource.storageProfile().dataDisks() != null) { int i = 0; for (DataDisk disk : resource.storageProfile().dataDisks()) { storageProfile.append("\n\t\tDataDisk: storageProfile.append("\n\t\t\tName: ").append(disk.name()); storageProfile.append("\n\t\t\tCaching: ").append(disk.caching()); storageProfile.append("\n\t\t\tCreateOption: ").append(disk.createOption()); storageProfile.append("\n\t\t\tDiskSizeGB: ").append(disk.diskSizeGB()); storageProfile.append("\n\t\t\tLun: ").append(disk.lun()); if (resource.isManagedDiskEnabled()) { if (disk.managedDisk() != null) { storageProfile.append("\n\t\t\tManaged Disk Id: ").append(disk.managedDisk().id()); if (disk.managedDisk().diskEncryptionSet() != null) { storageProfile.append("\n\t\t\tDiskEncryptionSet Id: ").append(disk.managedDisk().diskEncryptionSet().id()); } } } else { if (disk.vhd().uri() != null) { storageProfile.append("\n\t\t\tVhd Uri: ").append(disk.vhd().uri()); } } if (disk.image() != null) { storageProfile.append("\n\t\t\tImage Uri: ").append(disk.image().uri()); } } } StringBuilder osProfile = new StringBuilder().append("\n\tOSProfile: "); if (resource.osProfile() != null) { osProfile.append("\n\t\tComputerName:").append(resource.osProfile().computerName()); if (resource.osProfile().windowsConfiguration() != null) { osProfile.append("\n\t\t\tWindowsConfiguration: "); osProfile.append("\n\t\t\t\tProvisionVMAgent: ") .append(resource.osProfile().windowsConfiguration().provisionVMAgent()); osProfile.append("\n\t\t\t\tEnableAutomaticUpdates: ") .append(resource.osProfile().windowsConfiguration().enableAutomaticUpdates()); osProfile.append("\n\t\t\t\tTimeZone: ") .append(resource.osProfile().windowsConfiguration().timeZone()); } if (resource.osProfile().linuxConfiguration() != null) { osProfile.append("\n\t\t\tLinuxConfiguration: "); osProfile.append("\n\t\t\t\tDisablePasswordAuthentication: ") .append(resource.osProfile().linuxConfiguration().disablePasswordAuthentication()); } } else { osProfile.append("null"); } StringBuilder networkProfile = new StringBuilder().append("\n\tNetworkProfile: "); for (String networkInterfaceId : resource.networkInterfaceIds()) { networkProfile.append("\n\t\tId:").append(networkInterfaceId); } StringBuilder extensions = new StringBuilder().append("\n\tExtensions: "); for (Map.Entry<String, VirtualMachineExtension> extensionEntry : resource.listExtensions().entrySet()) { VirtualMachineExtension extension = extensionEntry.getValue(); extensions.append("\n\t\tExtension: ").append(extension.id()) .append("\n\t\t\tName: ").append(extension.name()) .append("\n\t\t\tTags: ").append(extension.tags()) .append("\n\t\t\tProvisioningState: ").append(extension.provisioningState()) .append("\n\t\t\tAuto upgrade minor version enabled: ").append(extension.autoUpgradeMinorVersionEnabled()) .append("\n\t\t\tPublisher: ").append(extension.publisherName()) .append("\n\t\t\tType: ").append(extension.typeName()) .append("\n\t\t\tVersion: ").append(extension.versionName()) .append("\n\t\t\tPublic Settings: ").append(extension.publicSettingsAsJsonString()); } StringBuilder msi = new StringBuilder().append("\n\tMSI: "); msi.append("\n\t\t\tMSI enabled:").append(resource.isManagedServiceIdentityEnabled()); msi.append("\n\t\t\tSystem Assigned MSI Active Directory Service Principal Id:").append(resource.systemAssignedManagedServiceIdentityPrincipalId()); msi.append("\n\t\t\tSystem Assigned MSI Active Directory Tenant Id:").append(resource.systemAssignedManagedServiceIdentityTenantId()); StringBuilder zones = new StringBuilder().append("\n\tZones: "); zones.append(resource.availabilityZones()); StringBuilder securityProfile = new StringBuilder().append("\n\tSecurityProfile: "); securityProfile.append("\n\t\t\tSecurity type: ").append(resource.securityType()); securityProfile.append("\n\t\t\tSecure Boot enabled: ").append(resource.isSecureBootEnabled()); securityProfile.append("\n\t\t\tvTPM enabled: ").append(resource.isVTpmEnabled()); System.out.println(new StringBuilder().append("Virtual Machine: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tHardwareProfile: ") .append("\n\t\tSize: ").append(resource.size()) .append(storageProfile) .append(osProfile) .append(networkProfile) .append(extensions) .append(msi) .append(zones) .append(securityProfile) .toString()); } /** * Print availability set info. * * @param resource an availability set */ public static void print(AvailabilitySet resource) { System.out.println(new StringBuilder().append("Availability Set: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tFault domain count: ").append(resource.faultDomainCount()) .append("\n\tUpdate domain count: ").append(resource.updateDomainCount()) .toString()); } /** * Print network info. * * @param resource a network * @throws ManagementException Cloud errors */ public static void print(Network resource) { StringBuilder info = new StringBuilder(); info.append("Network: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tAddress spaces: ").append(resource.addressSpaces()) .append("\n\tDNS server IPs: ").append(resource.dnsServerIPs()); for (Subnet subnet : resource.subnets().values()) { info.append("\n\tSubnet: ").append(subnet.name()) .append("\n\t\tAddress prefix: ").append(subnet.addressPrefix()); NetworkSecurityGroup subnetNsg = subnet.getNetworkSecurityGroup(); if (subnetNsg != null) { info.append("\n\t\tNetwork security group ID: ").append(subnetNsg.id()); } RouteTable routeTable = subnet.getRouteTable(); if (routeTable != null) { info.append("\n\tRoute table ID: ").append(routeTable.id()); } Map<ServiceEndpointType, List<Region>> services = subnet.servicesWithAccess(); if (services.size() > 0) { info.append("\n\tServices with access"); for (Map.Entry<ServiceEndpointType, List<Region>> service : services.entrySet()) { info.append("\n\t\tService: ") .append(service.getKey()) .append(" Regions: " + service.getValue() + ""); } } } for (NetworkPeering peering : resource.peerings().list()) { info.append("\n\tPeering: ").append(peering.name()) .append("\n\t\tRemote network ID: ").append(peering.remoteNetworkId()) .append("\n\t\tPeering state: ").append(peering.state()) .append("\n\t\tIs traffic forwarded from remote network allowed? ").append(peering.isTrafficForwardingFromRemoteNetworkAllowed()) .append("\n\t\tGateway use: ").append(peering.gatewayUse()); } System.out.println(info.toString()); } /** * Print network interface. * * @param resource a network interface */ public static void print(NetworkInterface resource) { StringBuilder info = new StringBuilder(); info.append("NetworkInterface: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tInternal DNS name label: ").append(resource.internalDnsNameLabel()) .append("\n\tInternal FQDN: ").append(resource.internalFqdn()) .append("\n\tInternal domain name suffix: ").append(resource.internalDomainNameSuffix()) .append("\n\tNetwork security group: ").append(resource.networkSecurityGroupId()) .append("\n\tApplied DNS servers: ").append(resource.appliedDnsServers().toString()) .append("\n\tDNS server IPs: "); for (String dnsServerIp : resource.dnsServers()) { info.append("\n\t\t").append(dnsServerIp); } info.append("\n\tIP forwarding enabled? ").append(resource.isIPForwardingEnabled()) .append("\n\tAccelerated networking enabled? ").append(resource.isAcceleratedNetworkingEnabled()) .append("\n\tMAC Address:").append(resource.macAddress()) .append("\n\tPrivate IP:").append(resource.primaryPrivateIP()) .append("\n\tPrivate allocation method:").append(resource.primaryPrivateIpAllocationMethod()) .append("\n\tPrimary virtual network ID: ").append(resource.primaryIPConfiguration().networkId()) .append("\n\tPrimary subnet name:").append(resource.primaryIPConfiguration().subnetName()); System.out.println(info.toString()); } /** * Print network security group. * * @param resource a network security group */ public static void print(NetworkSecurityGroup resource) { StringBuilder info = new StringBuilder(); info.append("NSG: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()); for (NetworkSecurityRule rule : resource.securityRules().values()) { info.append("\n\tRule: ").append(rule.name()) .append("\n\t\tAccess: ").append(rule.access()) .append("\n\t\tDirection: ").append(rule.direction()) .append("\n\t\tFrom address: ").append(rule.sourceAddressPrefix()) .append("\n\t\tFrom port range: ").append(rule.sourcePortRange()) .append("\n\t\tTo address: ").append(rule.destinationAddressPrefix()) .append("\n\t\tTo port: ").append(rule.destinationPortRange()) .append("\n\t\tProtocol: ").append(rule.protocol()) .append("\n\t\tPriority: ").append(rule.priority()); } System.out.println(info.toString()); } /** * Print public IP address. * * @param resource a public IP address */ public static void print(PublicIpAddress resource) { System.out.println(new StringBuilder().append("Public IP Address: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tIP Address: ").append(resource.ipAddress()) .append("\n\tLeaf domain label: ").append(resource.leafDomainLabel()) .append("\n\tFQDN: ").append(resource.fqdn()) .append("\n\tReverse FQDN: ").append(resource.reverseFqdn()) .append("\n\tIdle timeout (minutes): ").append(resource.idleTimeoutInMinutes()) .append("\n\tIP allocation method: ").append(resource.ipAllocationMethod()) .append("\n\tZones: ").append(resource.availabilityZones()) .toString()); } /** * Print a key vault. * * @param vault the key vault resource */ public static void print(Vault vault) { StringBuilder info = new StringBuilder().append("Key Vault: ").append(vault.id()) .append("Name: ").append(vault.name()) .append("\n\tResource group: ").append(vault.resourceGroupName()) .append("\n\tRegion: ").append(vault.region()) .append("\n\tSku: ").append(vault.sku().name()).append(" - ").append(vault.sku().family()) .append("\n\tVault URI: ").append(vault.vaultUri()) .append("\n\tAccess policies: "); for (AccessPolicy accessPolicy : vault.accessPolicies()) { info.append("\n\t\tIdentity:").append(accessPolicy.objectId()); if (accessPolicy.permissions() != null) { if (accessPolicy.permissions().keys() != null) { info.append("\n\t\tKey permissions: ").append(accessPolicy.permissions().keys().stream().map(KeyPermissions::toString).collect(Collectors.joining(", "))); } if (accessPolicy.permissions().secrets() != null) { info.append("\n\t\tSecret permissions: ").append(accessPolicy.permissions().secrets().stream().map(SecretPermissions::toString).collect(Collectors.joining(", "))); } if (accessPolicy.permissions().certificates() != null) { info.append("\n\t\tCertificate permissions: ").append(accessPolicy.permissions().certificates().stream().map(CertificatePermissions::toString).collect(Collectors.joining(", "))); } } } System.out.println(info.toString()); } /** * Print storage account. * * @param storageAccount a storage account */ public static void print(StorageAccount storageAccount) { System.out.println(storageAccount.name() + " created @ " + storageAccount.creationTime()); StringBuilder info = new StringBuilder().append("Storage Account: ").append(storageAccount.id()) .append("Name: ").append(storageAccount.name()) .append("\n\tResource group: ").append(storageAccount.resourceGroupName()) .append("\n\tRegion: ").append(storageAccount.region()) .append("\n\tSKU: ").append(storageAccount.skuType().name().toString()) .append("\n\tAccessTier: ").append(storageAccount.accessTier()) .append("\n\tKind: ").append(storageAccount.kind()); info.append("\n\tNetwork Rule Configuration: ") .append("\n\t\tAllow reading logs from any network: ").append(storageAccount.canReadLogEntriesFromAnyNetwork()) .append("\n\t\tAllow reading metrics from any network: ").append(storageAccount.canReadMetricsFromAnyNetwork()) .append("\n\t\tAllow access from all azure services: ").append(storageAccount.canAccessFromAzureServices()); if (storageAccount.networkSubnetsWithAccess().size() > 0) { info.append("\n\t\tNetwork subnets with access: "); for (String subnetId : storageAccount.networkSubnetsWithAccess()) { info.append("\n\t\t\t").append(subnetId); } } if (storageAccount.ipAddressesWithAccess().size() > 0) { info.append("\n\t\tIP addresses with access: "); for (String ipAddress : storageAccount.ipAddressesWithAccess()) { info.append("\n\t\t\t").append(ipAddress); } } if (storageAccount.ipAddressRangesWithAccess().size() > 0) { info.append("\n\t\tIP address-ranges with access: "); for (String ipAddressRange : storageAccount.ipAddressRangesWithAccess()) { info.append("\n\t\t\t").append(ipAddressRange); } } info.append("\n\t\tTraffic allowed from only HTTPS: ").append(storageAccount.innerModel().enableHttpsTrafficOnly()); info.append("\n\tEncryption status: "); info.append("\n\t\tInfrastructure Encryption: ").append(storageAccount.infrastructureEncryptionEnabled() ? "Enabled" : "Disabled"); for (Map.Entry<StorageService, StorageAccountEncryptionStatus> eStatus : storageAccount.encryptionStatuses().entrySet()) { info.append("\n\t\t").append(eStatus.getValue().storageService()).append(": ").append(eStatus.getValue().isEnabled() ? "Enabled" : "Disabled"); } System.out.println(info.toString()); } /** * Print storage account keys. * * @param storageAccountKeys a list of storage account keys */ public static void print(List<StorageAccountKey> storageAccountKeys) { for (int i = 0; i < storageAccountKeys.size(); i++) { StorageAccountKey storageAccountKey = storageAccountKeys.get(i); System.out.println("Key (" + i + ") " + storageAccountKey.keyName() + "=" + storageAccountKey.value()); } } /** * Print Redis Cache. * * @param redisCache a Redis cache. */ public static void print(RedisCache redisCache) { StringBuilder redisInfo = new StringBuilder() .append("Redis Cache Name: ").append(redisCache.name()) .append("\n\tResource group: ").append(redisCache.resourceGroupName()) .append("\n\tRegion: ").append(redisCache.region()) .append("\n\tSKU Name: ").append(redisCache.sku().name()) .append("\n\tSKU Family: ").append(redisCache.sku().family()) .append("\n\tHostname: ").append(redisCache.hostname()) .append("\n\tSSL port: ").append(redisCache.sslPort()) .append("\n\tNon-SSL port (6379) enabled: ").append(redisCache.nonSslPort()); if (redisCache.redisConfiguration() != null && !redisCache.redisConfiguration().isEmpty()) { redisInfo.append("\n\tRedis Configuration:"); for (Map.Entry<String, String> redisConfiguration : redisCache.redisConfiguration().entrySet()) { redisInfo.append("\n\t '").append(redisConfiguration.getKey()) .append("' : '").append(redisConfiguration.getValue()).append("'"); } } if (redisCache.isPremium()) { RedisCachePremium premium = redisCache.asPremium(); List<ScheduleEntry> scheduleEntries = premium.listPatchSchedules(); if (scheduleEntries != null && !scheduleEntries.isEmpty()) { redisInfo.append("\n\tRedis Patch Schedule:"); for (ScheduleEntry schedule : scheduleEntries) { redisInfo.append("\n\t\tDay: '").append(schedule.dayOfWeek()) .append("', start at: '").append(schedule.startHourUtc()) .append("', maintenance window: '").append(schedule.maintenanceWindow()) .append("'"); } } } System.out.println(redisInfo.toString()); } /** * Print Redis Cache access keys. * * @param redisAccessKeys a keys for Redis Cache */ public static void print(RedisAccessKeys redisAccessKeys) { StringBuilder redisKeys = new StringBuilder() .append("Redis Access Keys: ") .append("\n\tPrimary Key: '").append(redisAccessKeys.primaryKey()).append("', ") .append("\n\tSecondary Key: '").append(redisAccessKeys.secondaryKey()).append("', "); System.out.println(redisKeys.toString()); } /** * Print management lock. * * @param lock a management lock */ public static void print(ManagementLock lock) { StringBuilder info = new StringBuilder(); info.append("\nLock ID: ").append(lock.id()) .append("\nLocked resource ID: ").append(lock.lockedResourceId()) .append("\nLevel: ").append(lock.level()); System.out.println(info.toString()); } /** * Print load balancer. * * @param resource a load balancer */ public static void print(LoadBalancer resource) { StringBuilder info = new StringBuilder(); info.append("Load balancer: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tTags: ").append(resource.tags()) .append("\n\tBackends: ").append(resource.backends().keySet().toString()); info.append("\n\tPublic IP address IDs: ") .append(resource.publicIpAddressIds().size()); for (String pipId : resource.publicIpAddressIds()) { info.append("\n\t\tPIP id: ").append(pipId); } info.append("\n\tTCP probes: ") .append(resource.tcpProbes().size()); for (LoadBalancerTcpProbe probe : resource.tcpProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tHTTP probes: ") .append(resource.httpProbes().size()); for (LoadBalancerHttpProbe probe : resource.httpProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()) .append("\n\t\t\tHTTP request path: ").append(probe.requestPath()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tHTTPS probes: ") .append(resource.httpsProbes().size()); for (LoadBalancerHttpProbe probe : resource.httpsProbes().values()) { info.append("\n\t\tProbe name: ").append(probe.name()) .append("\n\t\t\tPort: ").append(probe.port()) .append("\n\t\t\tInterval in seconds: ").append(probe.intervalInSeconds()) .append("\n\t\t\tRetries before unhealthy: ").append(probe.numberOfProbes()) .append("\n\t\t\tHTTPS request path: ").append(probe.requestPath()); info.append("\n\t\t\tReferenced from load balancing rules: ") .append(probe.loadBalancingRules().size()); for (LoadBalancingRule rule : probe.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tLoad balancing rules: ") .append(resource.loadBalancingRules().size()); for (LoadBalancingRule rule : resource.loadBalancingRules().values()) { info.append("\n\t\tLB rule name: ").append(rule.name()) .append("\n\t\t\tProtocol: ").append(rule.protocol()) .append("\n\t\t\tFloating IP enabled? ").append(rule.floatingIPEnabled()) .append("\n\t\t\tIdle timeout in minutes: ").append(rule.idleTimeoutInMinutes()) .append("\n\t\t\tLoad distribution method: ").append(rule.loadDistribution().toString()); LoadBalancerFrontend frontend = rule.frontend(); info.append("\n\t\t\tFrontend: "); if (frontend != null) { info.append(frontend.name()); } else { info.append("(None)"); } info.append("\n\t\t\tFrontend port: ").append(rule.frontendPort()); LoadBalancerBackend backend = rule.backend(); info.append("\n\t\t\tBackend: "); if (backend != null) { info.append(backend.name()); } else { info.append("(None)"); } info.append("\n\t\t\tBackend port: ").append(rule.backendPort()); LoadBalancerProbe probe = rule.probe(); info.append("\n\t\t\tProbe: "); if (probe == null) { info.append("(None)"); } else { info.append(probe.name()).append(" [").append(probe.protocol().toString()).append("]"); } } info.append("\n\tFrontends: ") .append(resource.frontends().size()); for (LoadBalancerFrontend frontend : resource.frontends().values()) { info.append("\n\t\tFrontend name: ").append(frontend.name()) .append("\n\t\t\tInternet facing: ").append(frontend.isPublic()); if (frontend.isPublic()) { info.append("\n\t\t\tPublic IP Address ID: ").append(((LoadBalancerPublicFrontend) frontend).publicIpAddressId()); } else { info.append("\n\t\t\tVirtual network ID: ").append(((LoadBalancerPrivateFrontend) frontend).networkId()) .append("\n\t\t\tSubnet name: ").append(((LoadBalancerPrivateFrontend) frontend).subnetName()) .append("\n\t\t\tPrivate IP address: ").append(((LoadBalancerPrivateFrontend) frontend).privateIpAddress()) .append("\n\t\t\tPrivate IP allocation method: ").append(((LoadBalancerPrivateFrontend) frontend).privateIpAllocationMethod()); } info.append("\n\t\t\tReferenced inbound NAT pools: ") .append(frontend.inboundNatPools().size()); for (LoadBalancerInboundNatPool pool : frontend.inboundNatPools().values()) { info.append("\n\t\t\t\tName: ").append(pool.name()); } info.append("\n\t\t\tReferenced inbound NAT rules: ") .append(frontend.inboundNatRules().size()); for (LoadBalancerInboundNatRule rule : frontend.inboundNatRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } info.append("\n\t\t\tReferenced load balancing rules: ") .append(frontend.loadBalancingRules().size()); for (LoadBalancingRule rule : frontend.loadBalancingRules().values()) { info.append("\n\t\t\t\tName: ").append(rule.name()); } } info.append("\n\tInbound NAT rules: ") .append(resource.inboundNatRules().size()); for (LoadBalancerInboundNatRule natRule : resource.inboundNatRules().values()) { info.append("\n\t\tInbound NAT rule name: ").append(natRule.name()) .append("\n\t\t\tProtocol: ").append(natRule.protocol().toString()) .append("\n\t\t\tFrontend: ").append(natRule.frontend().name()) .append("\n\t\t\tFrontend port: ").append(natRule.frontendPort()) .append("\n\t\t\tBackend port: ").append(natRule.backendPort()) .append("\n\t\t\tBackend NIC ID: ").append(natRule.backendNetworkInterfaceId()) .append("\n\t\t\tBackend NIC IP config name: ").append(natRule.backendNicIpConfigurationName()) .append("\n\t\t\tFloating IP? ").append(natRule.floatingIPEnabled()) .append("\n\t\t\tIdle timeout in minutes: ").append(natRule.idleTimeoutInMinutes()); } info.append("\n\tInbound NAT pools: ") .append(resource.inboundNatPools().size()); for (LoadBalancerInboundNatPool natPool : resource.inboundNatPools().values()) { info.append("\n\t\tInbound NAT pool name: ").append(natPool.name()) .append("\n\t\t\tProtocol: ").append(natPool.protocol().toString()) .append("\n\t\t\tFrontend: ").append(natPool.frontend().name()) .append("\n\t\t\tFrontend port range: ") .append(natPool.frontendPortRangeStart()) .append("-") .append(natPool.frontendPortRangeEnd()) .append("\n\t\t\tBackend port: ").append(natPool.backendPort()); } info.append("\n\tBackends: ") .append(resource.backends().size()); for (LoadBalancerBackend backend : resource.backends().values()) { info.append("\n\t\tBackend name: ").append(backend.name()); info.append("\n\t\t\tReferenced NICs: ") .append(backend.backendNicIPConfigurationNames().entrySet().size()); for (Map.Entry<String, String> entry : backend.backendNicIPConfigurationNames().entrySet()) { info.append("\n\t\t\t\tNIC ID: ").append(entry.getKey()) .append(" - IP Config: ").append(entry.getValue()); } Set<String> vmIds = backend.getVirtualMachineIds(); info.append("\n\t\t\tReferenced virtual machine ids: ") .append(vmIds.size()); for (String vmId : vmIds) { info.append("\n\t\t\t\tVM ID: ").append(vmId); } info.append("\n\t\t\tReferenced load balancing rules: ") .append(new ArrayList<String>(backend.loadBalancingRules().keySet())); } System.out.println(info.toString()); } /** * Print app service domain. * * @param resource an app service domain */ public static void print(AppServiceDomain resource) { StringBuilder builder = new StringBuilder().append("Domain: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tCreated time: ").append(resource.createdTime()) .append("\n\tExpiration time: ").append(resource.expirationTime()) .append("\n\tContact: "); Contact contact = resource.registrantContact(); if (contact == null) { builder = builder.append("Private"); } else { builder = builder.append("\n\t\tName: ").append(contact.nameFirst() + " " + contact.nameLast()); } builder = builder.append("\n\tName servers: "); for (String nameServer : resource.nameServers()) { builder = builder.append("\n\t\t" + nameServer); } System.out.println(builder.toString()); } /** * Print app service certificate order. * * @param resource an app service certificate order */ public static void print(AppServiceCertificateOrder resource) { StringBuilder builder = new StringBuilder().append("App service certificate order: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDistinguished name: ").append(resource.distinguishedName()) .append("\n\tProduct type: ").append(resource.productType()) .append("\n\tValid years: ").append(resource.validityInYears()) .append("\n\tStatus: ").append(resource.status()) .append("\n\tIssuance time: ").append(resource.lastCertificateIssuanceTime()) .append("\n\tSigned certificate: ").append(resource.signedCertificate() == null ? null : resource.signedCertificate().thumbprint()); System.out.println(builder.toString()); } /** * Print app service plan. * * @param resource an app service plan */ public static void print(AppServicePlan resource) { StringBuilder builder = new StringBuilder().append("App service certificate order: ").append(resource.id()) .append("Name: ").append(resource.name()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tPricing tier: ").append(resource.pricingTier()); System.out.println(builder.toString()); } /** * Print a web app. * * @param resource a web app */ public static void print(WebAppBase resource) { StringBuilder builder = new StringBuilder().append("Web app: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tState: ").append(resource.state()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDefault hostname: ").append(resource.defaultHostname()) .append("\n\tApp service plan: ").append(resource.appServicePlanId()) .append("\n\tHost name bindings: "); for (HostnameBinding binding : resource.getHostnameBindings().values()) { builder = builder.append("\n\t\t" + binding.toString()); } builder = builder.append("\n\tSSL bindings: "); for (HostnameSslState binding : resource.hostnameSslStates().values()) { builder = builder.append("\n\t\t" + binding.name() + ": " + binding.sslState()); if (binding.sslState() != null && binding.sslState() != SslState.DISABLED) { builder = builder.append(" - " + binding.thumbprint()); } } builder = builder.append("\n\tApp settings: "); for (AppSetting setting : resource.getAppSettings().values()) { builder = builder.append("\n\t\t" + setting.key() + ": " + setting.value() + (setting.sticky() ? " - slot setting" : "")); } builder = builder.append("\n\tConnection strings: "); for (ConnectionString conn : resource.getConnectionStrings().values()) { builder = builder.append("\n\t\t" + conn.name() + ": " + conn.value() + " - " + conn.type() + (conn.sticky() ? " - slot setting" : "")); } System.out.println(builder.toString()); } /** * Print a web site. * * @param resource a web site */ public static void print(WebSiteBase resource) { StringBuilder builder = new StringBuilder().append("Web app: ").append(resource.id()) .append("\n\tName: ").append(resource.name()) .append("\n\tState: ").append(resource.state()) .append("\n\tResource group: ").append(resource.resourceGroupName()) .append("\n\tRegion: ").append(resource.region()) .append("\n\tDefault hostname: ").append(resource.defaultHostname()) .append("\n\tApp service plan: ").append(resource.appServicePlanId()); builder = builder.append("\n\tSSL bindings: "); for (HostnameSslState binding : resource.hostnameSslStates().values()) { builder = builder.append("\n\t\t" + binding.name() + ": " + binding.sslState()); if (binding.sslState() != null && binding.sslState() != SslState.DISABLED) { builder = builder.append(" - " + binding.thumbprint()); } } System.out.println(builder.toString()); } /** * Print a traffic manager profile. * * @param profile a traffic manager profile */ public static void print(TrafficManagerProfile profile) { StringBuilder info = new StringBuilder(); info.append("Traffic Manager Profile: ").append(profile.id()) .append("\n\tName: ").append(profile.name()) .append("\n\tResource group: ").append(profile.resourceGroupName()) .append("\n\tRegion: ").append(profile.regionName()) .append("\n\tTags: ").append(profile.tags()) .append("\n\tDNSLabel: ").append(profile.dnsLabel()) .append("\n\tFQDN: ").append(profile.fqdn()) .append("\n\tTTL: ").append(profile.timeToLive()) .append("\n\tEnabled: ").append(profile.isEnabled()) .append("\n\tRoutingMethod: ").append(profile.trafficRoutingMethod()) .append("\n\tMonitor status: ").append(profile.monitorStatus()) .append("\n\tMonitoring port: ").append(profile.monitoringPort()) .append("\n\tMonitoring path: ").append(profile.monitoringPath()); Map<String, TrafficManagerAzureEndpoint> azureEndpoints = profile.azureEndpoints(); if (!azureEndpoints.isEmpty()) { info.append("\n\tAzure endpoints:"); int idx = 1; for (TrafficManagerAzureEndpoint endpoint : azureEndpoints.values()) { info.append("\n\t\tAzure endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tTarget resourceId: ").append(endpoint.targetAzureResourceId()) .append("\n\t\t\tTarget resourceType: ").append(endpoint.targetResourceType()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } Map<String, TrafficManagerExternalEndpoint> externalEndpoints = profile.externalEndpoints(); if (!externalEndpoints.isEmpty()) { info.append("\n\tExternal endpoints:"); int idx = 1; for (TrafficManagerExternalEndpoint endpoint : externalEndpoints.values()) { info.append("\n\t\tExternal endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tFQDN: ").append(endpoint.fqdn()) .append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } Map<String, TrafficManagerNestedProfileEndpoint> nestedProfileEndpoints = profile.nestedProfileEndpoints(); if (!nestedProfileEndpoints.isEmpty()) { info.append("\n\tNested profile endpoints:"); int idx = 1; for (TrafficManagerNestedProfileEndpoint endpoint : nestedProfileEndpoints.values()) { info.append("\n\t\tNested profile endpoint: .append("\n\t\t\tId: ").append(endpoint.id()) .append("\n\t\t\tType: ").append(endpoint.endpointType()) .append("\n\t\t\tNested profileId: ").append(endpoint.nestedProfileId()) .append("\n\t\t\tMinimum child threshold: ").append(endpoint.minimumChildEndpointCount()) .append("\n\t\t\tSource Traffic Location: ").append(endpoint.sourceTrafficLocation()) .append("\n\t\t\tMonitor status: ").append(endpoint.monitorStatus()) .append("\n\t\t\tEnabled: ").append(endpoint.isEnabled()) .append("\n\t\t\tRouting priority: ").append(endpoint.routingPriority()) .append("\n\t\t\tRouting weight: ").append(endpoint.routingWeight()); } } System.out.println(info.toString()); } /** * Print a dns zone. * * @param dnsZone a dns zone */ public static void print(DnsZone dnsZone) { StringBuilder info = new StringBuilder(); info.append("DNS Zone: ").append(dnsZone.id()) .append("\n\tName (Top level domain): ").append(dnsZone.name()) .append("\n\tResource group: ").append(dnsZone.resourceGroupName()) .append("\n\tRegion: ").append(dnsZone.regionName()) .append("\n\tTags: ").append(dnsZone.tags()) .append("\n\tName servers:"); for (String nameServer : dnsZone.nameServers()) { info.append("\n\t\t").append(nameServer); } SoaRecordSet soaRecordSet = dnsZone.getSoaRecordSet(); SoaRecord soaRecord = soaRecordSet.record(); info.append("\n\tSOA Record:") .append("\n\t\tHost:").append(soaRecord.host()) .append("\n\t\tEmail:").append(soaRecord.email()) .append("\n\t\tExpire time (seconds):").append(soaRecord.expireTime()) .append("\n\t\tRefresh time (seconds):").append(soaRecord.refreshTime()) .append("\n\t\tRetry time (seconds):").append(soaRecord.retryTime()) .append("\n\t\tNegative response cache ttl (seconds):").append(soaRecord.minimumTtl()) .append("\n\t\tTTL (seconds):").append(soaRecordSet.timeToLive()); PagedIterable<ARecordSet> aRecordSets = dnsZone.aRecordSets().list(); info.append("\n\tA Record sets:"); for (ARecordSet aRecordSet : aRecordSets) { info.append("\n\t\tId: ").append(aRecordSet.id()) .append("\n\t\tName: ").append(aRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aRecordSet.timeToLive()) .append("\n\t\tIP v4 addresses: "); for (String ipAddress : aRecordSet.ipv4Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<AaaaRecordSet> aaaaRecordSets = dnsZone.aaaaRecordSets().list(); info.append("\n\tAAAA Record sets:"); for (AaaaRecordSet aaaaRecordSet : aaaaRecordSets) { info.append("\n\t\tId: ").append(aaaaRecordSet.id()) .append("\n\t\tName: ").append(aaaaRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aaaaRecordSet.timeToLive()) .append("\n\t\tIP v6 addresses: "); for (String ipAddress : aaaaRecordSet.ipv6Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cNameRecordSets().list(); info.append("\n\tCNAME Record sets:"); for (CnameRecordSet cnameRecordSet : cnameRecordSets) { info.append("\n\t\tId: ").append(cnameRecordSet.id()) .append("\n\t\tName: ").append(cnameRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(cnameRecordSet.timeToLive()) .append("\n\t\tCanonical name: ").append(cnameRecordSet.canonicalName()); } PagedIterable<MxRecordSet> mxRecordSets = dnsZone.mxRecordSets().list(); info.append("\n\tMX Record sets:"); for (MxRecordSet mxRecordSet : mxRecordSets) { info.append("\n\t\tId: ").append(mxRecordSet.id()) .append("\n\t\tName: ").append(mxRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(mxRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (MxRecord mxRecord : mxRecordSet.records()) { info.append("\n\t\t\tExchange server, Preference: ") .append(mxRecord.exchange()) .append(" ") .append(mxRecord.preference()); } } PagedIterable<NsRecordSet> nsRecordSets = dnsZone.nsRecordSets().list(); info.append("\n\tNS Record sets:"); for (NsRecordSet nsRecordSet : nsRecordSets) { info.append("\n\t\tId: ").append(nsRecordSet.id()) .append("\n\t\tName: ").append(nsRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(nsRecordSet.timeToLive()) .append("\n\t\tName servers: "); for (String nameServer : nsRecordSet.nameServers()) { info.append("\n\t\t\t").append(nameServer); } } PagedIterable<PtrRecordSet> ptrRecordSets = dnsZone.ptrRecordSets().list(); info.append("\n\tPTR Record sets:"); for (PtrRecordSet ptrRecordSet : ptrRecordSets) { info.append("\n\t\tId: ").append(ptrRecordSet.id()) .append("\n\t\tName: ").append(ptrRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(ptrRecordSet.timeToLive()) .append("\n\t\tTarget domain names: "); for (String domainNames : ptrRecordSet.targetDomainNames()) { info.append("\n\t\t\t").append(domainNames); } } PagedIterable<SrvRecordSet> srvRecordSets = dnsZone.srvRecordSets().list(); info.append("\n\tSRV Record sets:"); for (SrvRecordSet srvRecordSet : srvRecordSets) { info.append("\n\t\tId: ").append(srvRecordSet.id()) .append("\n\t\tName: ").append(srvRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(srvRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (SrvRecord srvRecord : srvRecordSet.records()) { info.append("\n\t\t\tTarget, Port, Priority, Weight: ") .append(srvRecord.target()) .append(", ") .append(srvRecord.port()) .append(", ") .append(srvRecord.priority()) .append(", ") .append(srvRecord.weight()); } } PagedIterable<TxtRecordSet> txtRecordSets = dnsZone.txtRecordSets().list(); info.append("\n\tTXT Record sets:"); for (TxtRecordSet txtRecordSet : txtRecordSets) { info.append("\n\t\tId: ").append(txtRecordSet.id()) .append("\n\t\tName: ").append(txtRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(txtRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (TxtRecord txtRecord : txtRecordSet.records()) { if (txtRecord.value().size() > 0) { info.append("\n\t\t\tValue: ").append(txtRecord.value().get(0)); } } } System.out.println(info.toString()); } /** * Print a private dns zone. * * @param privateDnsZone a private dns zone */ public static void print(PrivateDnsZone privateDnsZone) { StringBuilder info = new StringBuilder(); info.append("Private DNS Zone: ").append(privateDnsZone.id()) .append("\n\tName (Top level domain): ").append(privateDnsZone.name()) .append("\n\tResource group: ").append(privateDnsZone.resourceGroupName()) .append("\n\tRegion: ").append(privateDnsZone.regionName()) .append("\n\tTags: ").append(privateDnsZone.tags()) .append("\n\tName servers:"); com.azure.resourcemanager.privatedns.models.SoaRecordSet soaRecordSet = privateDnsZone.getSoaRecordSet(); com.azure.resourcemanager.privatedns.models.SoaRecord soaRecord = soaRecordSet.record(); info.append("\n\tSOA Record:") .append("\n\t\tHost:").append(soaRecord.host()) .append("\n\t\tEmail:").append(soaRecord.email()) .append("\n\t\tExpire time (seconds):").append(soaRecord.expireTime()) .append("\n\t\tRefresh time (seconds):").append(soaRecord.refreshTime()) .append("\n\t\tRetry time (seconds):").append(soaRecord.retryTime()) .append("\n\t\tNegative response cache ttl (seconds):").append(soaRecord.minimumTtl()) .append("\n\t\tTTL (seconds):").append(soaRecordSet.timeToLive()); PagedIterable<com.azure.resourcemanager.privatedns.models.ARecordSet> aRecordSets = privateDnsZone .aRecordSets().list(); info.append("\n\tA Record sets:"); for (com.azure.resourcemanager.privatedns.models.ARecordSet aRecordSet : aRecordSets) { info.append("\n\t\tId: ").append(aRecordSet.id()) .append("\n\t\tName: ").append(aRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aRecordSet.timeToLive()) .append("\n\t\tIP v4 addresses: "); for (String ipAddress : aRecordSet.ipv4Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<com.azure.resourcemanager.privatedns.models.AaaaRecordSet> aaaaRecordSets = privateDnsZone .aaaaRecordSets().list(); info.append("\n\tAAAA Record sets:"); for (com.azure.resourcemanager.privatedns.models.AaaaRecordSet aaaaRecordSet : aaaaRecordSets) { info.append("\n\t\tId: ").append(aaaaRecordSet.id()) .append("\n\t\tName: ").append(aaaaRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(aaaaRecordSet.timeToLive()) .append("\n\t\tIP v6 addresses: "); for (String ipAddress : aaaaRecordSet.ipv6Addresses()) { info.append("\n\t\t\t").append(ipAddress); } } PagedIterable<com.azure.resourcemanager.privatedns.models.CnameRecordSet> cnameRecordSets = privateDnsZone.cnameRecordSets().list(); info.append("\n\tCNAME Record sets:"); for (com.azure.resourcemanager.privatedns.models.CnameRecordSet cnameRecordSet : cnameRecordSets) { info.append("\n\t\tId: ").append(cnameRecordSet.id()) .append("\n\t\tName: ").append(cnameRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(cnameRecordSet.timeToLive()) .append("\n\t\tCanonical name: ").append(cnameRecordSet.canonicalName()); } PagedIterable<com.azure.resourcemanager.privatedns.models.MxRecordSet> mxRecordSets = privateDnsZone.mxRecordSets().list(); info.append("\n\tMX Record sets:"); for (com.azure.resourcemanager.privatedns.models.MxRecordSet mxRecordSet : mxRecordSets) { info.append("\n\t\tId: ").append(mxRecordSet.id()) .append("\n\t\tName: ").append(mxRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(mxRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.MxRecord mxRecord : mxRecordSet.records()) { info.append("\n\t\t\tExchange server, Preference: ") .append(mxRecord.exchange()) .append(" ") .append(mxRecord.preference()); } } PagedIterable<com.azure.resourcemanager.privatedns.models.PtrRecordSet> ptrRecordSets = privateDnsZone .ptrRecordSets().list(); info.append("\n\tPTR Record sets:"); for (com.azure.resourcemanager.privatedns.models.PtrRecordSet ptrRecordSet : ptrRecordSets) { info.append("\n\t\tId: ").append(ptrRecordSet.id()) .append("\n\t\tName: ").append(ptrRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(ptrRecordSet.timeToLive()) .append("\n\t\tTarget domain names: "); for (String domainNames : ptrRecordSet.targetDomainNames()) { info.append("\n\t\t\t").append(domainNames); } } PagedIterable<com.azure.resourcemanager.privatedns.models.SrvRecordSet> srvRecordSets = privateDnsZone .srvRecordSets().list(); info.append("\n\tSRV Record sets:"); for (com.azure.resourcemanager.privatedns.models.SrvRecordSet srvRecordSet : srvRecordSets) { info.append("\n\t\tId: ").append(srvRecordSet.id()) .append("\n\t\tName: ").append(srvRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(srvRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.SrvRecord srvRecord : srvRecordSet.records()) { info.append("\n\t\t\tTarget, Port, Priority, Weight: ") .append(srvRecord.target()) .append(", ") .append(srvRecord.port()) .append(", ") .append(srvRecord.priority()) .append(", ") .append(srvRecord.weight()); } } PagedIterable<com.azure.resourcemanager.privatedns.models.TxtRecordSet> txtRecordSets = privateDnsZone .txtRecordSets().list(); info.append("\n\tTXT Record sets:"); for (com.azure.resourcemanager.privatedns.models.TxtRecordSet txtRecordSet : txtRecordSets) { info.append("\n\t\tId: ").append(txtRecordSet.id()) .append("\n\t\tName: ").append(txtRecordSet.name()) .append("\n\t\tTTL (seconds): ").append(txtRecordSet.timeToLive()) .append("\n\t\tRecords: "); for (com.azure.resourcemanager.privatedns.models.TxtRecord txtRecord : txtRecordSet.records()) { if (txtRecord.value().size() > 0) { info.append("\n\t\t\tValue: ").append(txtRecord.value().get(0)); } } } PagedIterable<VirtualNetworkLink> virtualNetworkLinks = privateDnsZone.virtualNetworkLinks().list(); info.append("\n\tVirtual Network Links:"); for (VirtualNetworkLink virtualNetworkLink : virtualNetworkLinks) { info.append("\n\tId: ").append(virtualNetworkLink.id()) .append("\n\tName: ").append(virtualNetworkLink.name()) .append("\n\tReference of Virtual Network: ").append(virtualNetworkLink.referencedVirtualNetworkId()) .append("\n\tRegistration enabled: ").append(virtualNetworkLink.isAutoRegistrationEnabled()); } System.out.println(info.toString()); } /** * Print an Azure Container Registry. * * @param azureRegistry an Azure Container Registry */ public static void print(Registry azureRegistry) { StringBuilder info = new StringBuilder(); RegistryCredentials acrCredentials = azureRegistry.getCredentials(); info.append("Azure Container Registry: ").append(azureRegistry.id()) .append("\n\tName: ").append(azureRegistry.name()) .append("\n\tServer Url: ").append(azureRegistry.loginServerUrl()) .append("\n\tUser: ").append(acrCredentials.username()) .append("\n\tFirst Password: ").append(acrCredentials.accessKeys().get(AccessKeyType.PRIMARY)) .append("\n\tSecond Password: ").append(acrCredentials.accessKeys().get(AccessKeyType.SECONDARY)); System.out.println(info.toString()); } /** * Print an Azure Container Service (AKS). * * @param kubernetesCluster a managed container service */ public static void print(KubernetesCluster kubernetesCluster) { StringBuilder info = new StringBuilder(); info.append("Azure Container Service: ").append(kubernetesCluster.id()) .append("\n\tName: ").append(kubernetesCluster.name()) .append("\n\tFQDN: ").append(kubernetesCluster.fqdn()) .append("\n\tDNS prefix label: ").append(kubernetesCluster.dnsPrefix()) .append("\n\t\tWith Agent pool name: ").append(new ArrayList<>(kubernetesCluster.agentPools().keySet()).get(0)) .append("\n\t\tAgent pool count: ").append(new ArrayList<>(kubernetesCluster.agentPools().values()).get(0).count()) .append("\n\t\tAgent pool VM size: ").append(new ArrayList<>(kubernetesCluster.agentPools().values()).get(0).vmSize().toString()) .append("\n\tLinux user name: ").append(kubernetesCluster.linuxRootUsername()) .append("\n\tSSH key: ").append(kubernetesCluster.sshKey()) .append("\n\tService principal client ID: ").append(kubernetesCluster.servicePrincipalClientId()); System.out.println(info.toString()); } /** * Print an Azure Search Service. * * @param searchService an Azure Search Service */ public static void print(SearchService searchService) { StringBuilder info = new StringBuilder(); AdminKeys adminKeys = searchService.getAdminKeys(); PagedIterable<QueryKey> queryKeys = searchService.listQueryKeys(); info.append("Azure Search: ").append(searchService.id()) .append("\n\tResource group: ").append(searchService.resourceGroupName()) .append("\n\tRegion: ").append(searchService.region()) .append("\n\tTags: ").append(searchService.tags()) .append("\n\tSku: ").append(searchService.sku().name()) .append("\n\tStatus: ").append(searchService.status()) .append("\n\tProvisioning State: ").append(searchService.provisioningState()) .append("\n\tHosting Mode: ").append(searchService.hostingMode()) .append("\n\tReplicas: ").append(searchService.replicaCount()) .append("\n\tPartitions: ").append(searchService.partitionCount()) .append("\n\tPrimary Admin Key: ").append(adminKeys.primaryKey()) .append("\n\tSecondary Admin Key: ").append(adminKeys.secondaryKey()) .append("\n\tQuery keys:"); for (QueryKey queryKey : queryKeys) { info.append("\n\t\tKey name: ").append(queryKey.name()); info.append("\n\t\t Value: ").append(queryKey.key()); } System.out.println(info.toString()); } /** * Retrieve the secondary service principal client ID. * * @param envSecondaryServicePrincipal an Azure Container Registry * @return a service principal client ID * @throws IOException exception */
wonder if we could add version helper string to linkage errors here (from `JacksonVersion`) - it was useful for environments like spark with limited access to logs
private XmlMapperFactory() { MethodHandles.Lookup publicLookup = MethodHandles.publicLookup(); MethodHandle createXmlMapperBuilder; MethodHandle defaultUseWrapper; MethodHandle enableWriteXmlDeclaration; Object writeXmlDeclaration; MethodHandle enableEmptyElementAsNull; Object emptyElementAsNull; try { Class<?> xmlMapper = Class.forName(XML_MAPPER); Class<?> xmlMapperBuilder = Class.forName(XML_MAPPER_BUILDER); Class<?> fromXmlParser = Class.forName(FROM_XML_PARSER); Class<?> toXmlGenerator = Class.forName(TO_XML_GENERATOR); createXmlMapperBuilder = publicLookup.unreflect(xmlMapper.getDeclaredMethod("builder")); defaultUseWrapper = publicLookup.unreflect(xmlMapperBuilder.getDeclaredMethod("defaultUseWrapper", boolean.class)); enableWriteXmlDeclaration = publicLookup.unreflect(xmlMapperBuilder.getDeclaredMethod("enable", Array.newInstance(toXmlGenerator, 0).getClass())); writeXmlDeclaration = toXmlGenerator.getDeclaredField("WRITE_XML_DECLARATION").get(null); enableEmptyElementAsNull = publicLookup.unreflect(xmlMapperBuilder.getDeclaredMethod("enable", Array.newInstance(fromXmlParser, 0).getClass())); emptyElementAsNull = fromXmlParser.getDeclaredField("EMPTY_ELEMENT_AS_NULL").get(null); } catch (Throwable ex) { if (ex instanceof Error && !(ex instanceof LinkageError)) { throw (Error) ex; } throw LOGGER.logExceptionAsError(new IllegalStateException("Failed to retrieve MethodHandles used to " + "create XmlMapper. XML serialization won't be supported until " + "'com.fasterxml.jackson.dataformat:jackson-dataformat-xml' is added to the classpath.", ex)); } this.createXmlMapperBuilder = createXmlMapperBuilder; this.defaultUseWrapper = defaultUseWrapper; this.enableWriteXmlDeclaration = enableWriteXmlDeclaration; this.writeXmlDeclaration = writeXmlDeclaration; this.enableEmptyElementAsNull = enableEmptyElementAsNull; this.emptyElementAsNull = emptyElementAsNull; MethodHandle coercionConfigDefaults = null; MethodHandle setCoercion = null; Object coercionInputShapeEmptyString = null; Object coercionActionAsNull = null; boolean useReflectionToSetCoercion = false; try { Class<?> mutableCoercionConfig = Class.forName(MUTABLE_COERCION_CONFIG); Class<?> coercionInputShapeClass = Class.forName(COERCION_INPUT_SHAPE); Class<?> coercionActionClass = Class.forName(COERCION_ACTION); coercionConfigDefaults = publicLookup.findVirtual(ObjectMapper.class, "coercionConfigDefaults", MethodType.methodType(mutableCoercionConfig)); setCoercion = publicLookup.findVirtual(mutableCoercionConfig, "setCoercion", MethodType.methodType(mutableCoercionConfig, coercionInputShapeClass, coercionActionClass)); coercionInputShapeEmptyString = publicLookup.findStaticGetter(coercionInputShapeClass, "EmptyString", coercionInputShapeClass).invoke(); coercionActionAsNull = publicLookup.findStaticGetter(coercionActionClass, "AsNull", coercionActionClass) .invoke(); useReflectionToSetCoercion = true; } catch (Throwable ex) { if (ex instanceof Error && !(ex instanceof LinkageError)) { throw (Error) ex; } LOGGER.verbose("Failed to retrieve MethodHandles used to set coercion configurations. " + "Setting coercion configurations will be skipped. " + "Please update your Jackson dependencies to at least version 2.12", ex); } this.coercionConfigDefaults = coercionConfigDefaults; this.setCoercion = setCoercion; this.coercionInputShapeEmptyString = coercionInputShapeEmptyString; this.coercionActionAsNull = coercionActionAsNull; this.useReflectionToSetCoercion = useReflectionToSetCoercion; }
throw LOGGER.logExceptionAsError(new IllegalStateException("Failed to retrieve MethodHandles used to "
private XmlMapperFactory() { MethodHandles.Lookup publicLookup = MethodHandles.publicLookup(); MethodHandle createXmlMapperBuilder; MethodHandle defaultUseWrapper; MethodHandle enableWriteXmlDeclaration; Object writeXmlDeclaration; MethodHandle enableEmptyElementAsNull; Object emptyElementAsNull; try { Class<?> xmlMapper = Class.forName(XML_MAPPER); Class<?> xmlMapperBuilder = Class.forName(XML_MAPPER_BUILDER); Class<?> fromXmlParser = Class.forName(FROM_XML_PARSER); Class<?> toXmlGenerator = Class.forName(TO_XML_GENERATOR); createXmlMapperBuilder = publicLookup.unreflect(xmlMapper.getDeclaredMethod("builder")); defaultUseWrapper = publicLookup.unreflect(xmlMapperBuilder.getDeclaredMethod("defaultUseWrapper", boolean.class)); enableWriteXmlDeclaration = publicLookup.unreflect(xmlMapperBuilder.getDeclaredMethod("enable", Array.newInstance(toXmlGenerator, 0).getClass())); writeXmlDeclaration = toXmlGenerator.getDeclaredField("WRITE_XML_DECLARATION").get(null); enableEmptyElementAsNull = publicLookup.unreflect(xmlMapperBuilder.getDeclaredMethod("enable", Array.newInstance(fromXmlParser, 0).getClass())); emptyElementAsNull = fromXmlParser.getDeclaredField("EMPTY_ELEMENT_AS_NULL").get(null); } catch (Throwable ex) { if (ex instanceof Error && !(ex instanceof LinkageError)) { throw (Error) ex; } throw LOGGER.logExceptionAsError(new IllegalStateException("Failed to retrieve MethodHandles used to " + "create XmlMapper. XML serialization won't be supported until " + "'com.fasterxml.jackson.dataformat:jackson-dataformat-xml' is added to the classpath or updated to a " + "supported version. " + JacksonVersion.getHelpInfo(), ex)); } this.createXmlMapperBuilder = createXmlMapperBuilder; this.defaultUseWrapper = defaultUseWrapper; this.enableWriteXmlDeclaration = enableWriteXmlDeclaration; this.writeXmlDeclaration = writeXmlDeclaration; this.enableEmptyElementAsNull = enableEmptyElementAsNull; this.emptyElementAsNull = emptyElementAsNull; MethodHandle coercionConfigDefaults = null; MethodHandle setCoercion = null; Object coercionInputShapeEmptyString = null; Object coercionActionAsNull = null; boolean useReflectionToSetCoercion = false; try { Class<?> mutableCoercionConfig = Class.forName(MUTABLE_COERCION_CONFIG); Class<?> coercionInputShapeClass = Class.forName(COERCION_INPUT_SHAPE); Class<?> coercionActionClass = Class.forName(COERCION_ACTION); coercionConfigDefaults = publicLookup.findVirtual(ObjectMapper.class, "coercionConfigDefaults", MethodType.methodType(mutableCoercionConfig)); setCoercion = publicLookup.findVirtual(mutableCoercionConfig, "setCoercion", MethodType.methodType(mutableCoercionConfig, coercionInputShapeClass, coercionActionClass)); coercionInputShapeEmptyString = publicLookup.findStaticGetter(coercionInputShapeClass, "EmptyString", coercionInputShapeClass).invoke(); coercionActionAsNull = publicLookup.findStaticGetter(coercionActionClass, "AsNull", coercionActionClass) .invoke(); useReflectionToSetCoercion = true; } catch (Throwable ex) { if (ex instanceof Error && !(ex instanceof LinkageError)) { throw (Error) ex; } LOGGER.verbose("Failed to retrieve MethodHandles used to set coercion configurations. " + "Setting coercion configurations will be skipped. " + "Please update your Jackson dependencies to at least version 2.12", ex); } this.coercionConfigDefaults = coercionConfigDefaults; this.setCoercion = setCoercion; this.coercionInputShapeEmptyString = coercionInputShapeEmptyString; this.coercionActionAsNull = coercionActionAsNull; this.useReflectionToSetCoercion = useReflectionToSetCoercion; }
class XmlMapperFactory { private static final ClientLogger LOGGER = new ClientLogger(XmlMapperFactory.class); private static final String XML_MAPPER = "com.fasterxml.jackson.dataformat.xml.XmlMapper"; private static final String XML_MAPPER_BUILDER = "com.fasterxml.jackson.dataformat.xml.XmlMapper$Builder"; private static final String FROM_XML_PARSER = "com.fasterxml.jackson.dataformat.xml.deser.FromXmlParser$Feature"; private static final String TO_XML_GENERATOR = "com.fasterxml.jackson.dataformat.xml.ser.ToXmlGenerator$Feature"; private final MethodHandle createXmlMapperBuilder; private final MethodHandle defaultUseWrapper; private final MethodHandle enableWriteXmlDeclaration; private final Object writeXmlDeclaration; private final MethodHandle enableEmptyElementAsNull; private final Object emptyElementAsNull; private static final String MUTABLE_COERCION_CONFIG = "com.fasterxml.jackson.databind.cfg.MutableCoercionConfig"; private static final String COERCION_INPUT_SHAPE = "com.fasterxml.jackson.databind.cfg.CoercionInputShape"; private static final String COERCION_ACTION = "com.fasterxml.jackson.databind.cfg.CoercionAction"; private final MethodHandle coercionConfigDefaults; private final MethodHandle setCoercion; private final Object coercionInputShapeEmptyString; private final Object coercionActionAsNull; private final boolean useReflectionToSetCoercion; public static final XmlMapperFactory INSTANCE = new XmlMapperFactory(); public ObjectMapper createXmlMapper() { ObjectMapper xmlMapper; try { MapperBuilder<?, ?> xmlMapperBuilder = ObjectMapperFactory .initializeMapperBuilder((MapperBuilder<?, ?>) createXmlMapperBuilder.invoke()); defaultUseWrapper.invokeWithArguments(xmlMapperBuilder, false); enableWriteXmlDeclaration.invokeWithArguments(xmlMapperBuilder, writeXmlDeclaration); /* * In Jackson 2.12 the default value of this feature changed from true to false. * https: */ enableEmptyElementAsNull.invokeWithArguments(xmlMapperBuilder, emptyElementAsNull); xmlMapper = xmlMapperBuilder.build(); } catch (Throwable e) { if (e instanceof Error) { throw (Error) e; } throw LOGGER.logExceptionAsError(new IllegalStateException("Unable to create XmlMapper instance.", e)); } if (useReflectionToSetCoercion) { try { Object object = coercionConfigDefaults.invoke(xmlMapper); setCoercion.invoke(object, coercionInputShapeEmptyString, coercionActionAsNull); } catch (Throwable e) { if (e instanceof Error) { throw (Error) e; } LOGGER.verbose("Failed to set coercion actions.", e); } } else { LOGGER.verbose("Didn't set coercion defaults as it wasn't found on the classpath."); } return xmlMapper; } }
class XmlMapperFactory { private static final ClientLogger LOGGER = new ClientLogger(XmlMapperFactory.class); private static final String XML_MAPPER = "com.fasterxml.jackson.dataformat.xml.XmlMapper"; private static final String XML_MAPPER_BUILDER = "com.fasterxml.jackson.dataformat.xml.XmlMapper$Builder"; private static final String FROM_XML_PARSER = "com.fasterxml.jackson.dataformat.xml.deser.FromXmlParser$Feature"; private static final String TO_XML_GENERATOR = "com.fasterxml.jackson.dataformat.xml.ser.ToXmlGenerator$Feature"; private final MethodHandle createXmlMapperBuilder; private final MethodHandle defaultUseWrapper; private final MethodHandle enableWriteXmlDeclaration; private final Object writeXmlDeclaration; private final MethodHandle enableEmptyElementAsNull; private final Object emptyElementAsNull; private static final String MUTABLE_COERCION_CONFIG = "com.fasterxml.jackson.databind.cfg.MutableCoercionConfig"; private static final String COERCION_INPUT_SHAPE = "com.fasterxml.jackson.databind.cfg.CoercionInputShape"; private static final String COERCION_ACTION = "com.fasterxml.jackson.databind.cfg.CoercionAction"; private final MethodHandle coercionConfigDefaults; private final MethodHandle setCoercion; private final Object coercionInputShapeEmptyString; private final Object coercionActionAsNull; private final boolean useReflectionToSetCoercion; public static final XmlMapperFactory INSTANCE = new XmlMapperFactory(); public ObjectMapper createXmlMapper() { ObjectMapper xmlMapper; try { MapperBuilder<?, ?> xmlMapperBuilder = ObjectMapperFactory .initializeMapperBuilder((MapperBuilder<?, ?>) createXmlMapperBuilder.invoke()); defaultUseWrapper.invokeWithArguments(xmlMapperBuilder, false); enableWriteXmlDeclaration.invokeWithArguments(xmlMapperBuilder, writeXmlDeclaration); /* * In Jackson 2.12 the default value of this feature changed from true to false. * https: */ enableEmptyElementAsNull.invokeWithArguments(xmlMapperBuilder, emptyElementAsNull); xmlMapper = xmlMapperBuilder.build(); } catch (Throwable e) { if (e instanceof Error) { throw (Error) e; } throw LOGGER.logExceptionAsError(new IllegalStateException("Unable to create XmlMapper instance.", e)); } if (useReflectionToSetCoercion) { try { Object object = coercionConfigDefaults.invoke(xmlMapper); setCoercion.invoke(object, coercionInputShapeEmptyString, coercionActionAsNull); } catch (Throwable e) { if (e instanceof Error) { throw (Error) e; } LOGGER.verbose("Failed to set coercion actions.", e); } } else { LOGGER.verbose("Didn't set coercion defaults as it wasn't found on the classpath."); } return xmlMapper; } }
this can be changed to `.map(this::translateDtmfToneToInternal)`
Mono<Response<Void>> recognizeWithResponseInternal(CallMediaRecognizeOptions recognizeOptions, Context context) { try { context = context == null ? Context.NONE : context; if (recognizeOptions instanceof CallMediaRecognizeDtmfOptions) { CallMediaRecognizeDtmfOptions dtmfRecognizeOptions = (CallMediaRecognizeDtmfOptions) recognizeOptions; DtmfOptionsInternal dtmfOptionsInternal = new DtmfOptionsInternal(); dtmfOptionsInternal.setInterToneTimeoutInSeconds((int) dtmfRecognizeOptions.getInterToneTimeout().getSeconds()); if (dtmfRecognizeOptions.getMaxTonesToCollect() != null) { dtmfOptionsInternal.setMaxTonesToCollect(dtmfRecognizeOptions.getMaxTonesToCollect()); } if (dtmfRecognizeOptions.getStopTones() != null) { List<DtmfTone> dtmfTones = dtmfRecognizeOptions.getStopTones().stream() .map(e -> translateDtmfToneToInternal(e)) .collect(Collectors.toList()); dtmfOptionsInternal.setStopTones(dtmfTones); } RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setDtmfOptions(dtmfOptionsInternal) .setInterruptPrompt(recognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(recognizeOptions.getTargetParticipant())); recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) recognizeOptions.getInitialSilenceTimeout().getSeconds()); PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); playSourceInternal = translatePlaySourceToPlaySourceInternal(playSource); } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(recognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(recognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else if (recognizeOptions instanceof CallMediaRecognizeChoiceOptions) { CallMediaRecognizeChoiceOptions choiceRecognizeOptions = (CallMediaRecognizeChoiceOptions) recognizeOptions; RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setChoices(translateRecognizeChoiceListtoInternal(choiceRecognizeOptions.getRecognizeChoices())) .setInterruptPrompt(choiceRecognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(choiceRecognizeOptions.getTargetParticipant())); recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) choiceRecognizeOptions.getInitialSilenceTimeout().getSeconds()); PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); playSourceInternal = translatePlaySourceToPlaySourceInternal(playSource); } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(choiceRecognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(choiceRecognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else { return monoError(logger, new UnsupportedOperationException(recognizeOptions.getClass().getName())); } } catch (RuntimeException e) { return monoError(logger, e); } }
.map(e -> translateDtmfToneToInternal(e))
new DtmfOptionsInternal(); dtmfOptionsInternal.setInterToneTimeoutInSeconds((int) dtmfRecognizeOptions.getInterToneTimeout().getSeconds()); if (dtmfRecognizeOptions.getMaxTonesToCollect() != null) { dtmfOptionsInternal.setMaxTonesToCollect(dtmfRecognizeOptions.getMaxTonesToCollect()); }
class CallMediaAsync { private final CallMediasImpl contentsInternal; private final String callConnectionId; private final ClientLogger logger; CallMediaAsync(String callConnectionId, CallMediasImpl contentsInternal) { this.callConnectionId = callConnectionId; this.contentsInternal = contentsInternal; this.logger = new ClientLogger(CallMediaAsync.class); } /** * Play * * @param playSource A {@link PlaySource} representing the source to play. * @param playTo the targets to play to * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Void for successful play request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> play(PlaySource playSource, List<CommunicationIdentifier> playTo) { return playWithResponse(playSource, playTo, null).flatMap(FluxUtil::toMono); } /** * Play to all participants * * @param playSource A {@link PlaySource} representing the source to play. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Void for successful playAll request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> playToAll(PlaySource playSource) { return playToAllWithResponse(playSource, null).flatMap(FluxUtil::toMono); } /** * Play * * @param playSource A {@link PlaySource} representing the source to play. * @param playTo the targets to play to * @param options play options. * @return Response for successful play request. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> playWithResponse(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options) { return playWithResponseInternal(playSource, playTo, options, null); } /** * Play to all participants * * @param playSource A {@link PlaySource} representing the source to play. * @param options play options. * @return Response for successful playAll request. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> playToAllWithResponse(PlaySource playSource, PlayOptions options) { return playWithResponseInternal(playSource, Collections.emptyList(), options, null); } /** * Recognize operation. * @param recognizeOptions Different attributes for recognize. * @return Response for successful recognize request. */ public Mono<Void> startRecognizing(CallMediaRecognizeOptions recognizeOptions) { return startRecognizingWithResponse(recognizeOptions).then(); } /** * Recognize operation * @param recognizeOptions Different attributes for recognize. * @return Response for successful recognize request. */ public Mono<Response<Void>> startRecognizingWithResponse(CallMediaRecognizeOptions recognizeOptions) { return withContext(context -> recognizeWithResponseInternal(recognizeOptions, context)); } Mono<Response<Void>> recognizeWithResponseInternal(CallMediaRecognizeOptions recognizeOptions, Context context) { try { context = context == null ? Context.NONE : context; if (recognizeOptions instanceof CallMediaRecognizeDtmfOptions) { CallMediaRecognizeDtmfOptions dtmfRecognizeOptions = (CallMediaRecognizeDtmfOptions) recognizeOptions; DtmfOptionsInternal dtmfOptionsInternal = if (dtmfRecognizeOptions.getStopTones() != null) { List<DtmfTone> dtmfTones = dtmfRecognizeOptions.getStopTones().stream() .map(e -> translateDtmfToneToInternal(e)) .collect(Collectors.toList()); dtmfOptionsInternal.setStopTones(dtmfTones); } RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setDtmfOptions(dtmfOptionsInternal) .setInterruptPrompt(recognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(recognizeOptions.getTargetParticipant())); recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) recognizeOptions.getInitialSilenceTimeout().getSeconds()); PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); playSourceInternal = translatePlaySourceToPlaySourceInternal(playSource); } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(recognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(recognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else if (recognizeOptions instanceof CallMediaRecognizeChoiceOptions) { CallMediaRecognizeChoiceOptions choiceRecognizeOptions = (CallMediaRecognizeChoiceOptions) recognizeOptions; RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setChoices(translateRecognizeChoiceListtoInternal(choiceRecognizeOptions.getRecognizeChoices())) .setInterruptPrompt(choiceRecognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(choiceRecognizeOptions.getTargetParticipant())); recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) choiceRecognizeOptions.getInitialSilenceTimeout().getSeconds()); PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); playSourceInternal = translatePlaySourceToPlaySourceInternal(playSource); } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(choiceRecognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(choiceRecognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else { return monoError(logger, new UnsupportedOperationException(recognizeOptions.getClass().getName())); } } catch (RuntimeException e) { return monoError(logger, e); } } /** * Cancels all the queued media operations. * @return Void */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> cancelAllMediaOperations() { return cancelAllMediaOperationsWithResponse().then(); } /** * Cancels all the queued media operations * @return Response for successful playAll request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelAllMediaOperationsWithResponse() { return cancelAllMediaOperationsWithResponseInternal(null); } Mono<Response<Void>> cancelAllMediaOperationsWithResponseInternal(Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return contentsInternal.cancelAllMediaOperationsWithResponseAsync(callConnectionId, contextValue) .onErrorMap(HttpResponseException.class, ErrorConstructorProxy::create); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> playWithResponseInternal(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; PlayRequest request = getPlayRequest(playSource, playTo, options); return contentsInternal.playWithResponseAsync(callConnectionId, request, contextValue) .onErrorMap(HttpResponseException.class, ErrorConstructorProxy::create); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } PlayRequest getPlayRequest(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options) { PlaySourceInternal playSourceInternal = new PlaySourceInternal(); if (playSource instanceof FileSource) { playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) playSource); } else if (playSource instanceof TextSource) { playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) playSource); } if (playSourceInternal.getSourceType() != null) { PlayRequest request = new PlayRequest() .setPlaySourceInfo(playSourceInternal) .setPlayTo( playTo .stream() .map(CommunicationIdentifierConverter::convert) .collect(Collectors.toList())); if (options != null) { request.setPlayOptions(new PlayOptionsInternal().setLoop(options.isLoop())); request.setOperationContext(options.getOperationContext()); } return request; } throw logger.logExceptionAsError(new IllegalArgumentException(playSource.getClass().getCanonicalName())); } private PlaySourceInternal getPlaySourceInternalFromFileSource(FileSource playSource) { FileSourceInternal fileSourceInternal = new FileSourceInternal().setUri(playSource.getUri()); PlaySourceInternal playSourceInternal = new PlaySourceInternal() .setSourceType(PlaySourceTypeInternal.FILE) .setFileSource(fileSourceInternal) .setPlaySourceId(playSource.getPlaySourceId()); return playSourceInternal; } private PlaySourceInternal getPlaySourceInternalFromTextSource(TextSource playSource) { TextSourceInternal textSourceInternal = new TextSourceInternal().setText(playSource.getText()); if (playSource.getVoiceGender() != null) { textSourceInternal.setVoiceGender(GenderType.fromString(playSource.getVoiceGender().toString())); } if (playSource.getSourceLocale() != null) { textSourceInternal.setSourceLocale(playSource.getSourceLocale()); } if (playSource.getVoiceName() != null) { textSourceInternal.setVoiceName(playSource.getVoiceName()); } PlaySourceInternal playSourceInternal = new PlaySourceInternal() .setSourceType(PlaySourceTypeInternal.TEXT) .setTextSource(textSourceInternal) .setPlaySourceId(playSource.getPlaySourceId()); return playSourceInternal; } private PlaySourceInternal translatePlaySourceToPlaySourceInternal(PlaySource playSource) { PlaySourceInternal playSourceInternal = new PlaySourceInternal(); if (playSource instanceof FileSource) { playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) playSource); } else if (playSource instanceof TextSource) { playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) playSource); } return playSourceInternal; } private List<RecognizeChoice> translateRecognizeChoiceListtoInternal(List<com.azure.communication.callautomation.models.RecognizeChoice> recognizeChoices) { return recognizeChoices.stream() .map(e -> translateRecognizeChoiceToInternal(e)) .collect(Collectors.toList()); } private RecognizeChoice translateRecognizeChoiceToInternal(com.azure.communication.callautomation.models.RecognizeChoice recognizeChoice) { RecognizeChoice internalRecognizeChoice = new RecognizeChoice(); if (recognizeChoice.getLabel() != null) { internalRecognizeChoice.setLabel(recognizeChoice.getLabel()); } if (recognizeChoice.getPhrases() != null) { internalRecognizeChoice.setPhrases(recognizeChoice.getPhrases()); } if (recognizeChoice.getTone() != null) { internalRecognizeChoice.setTone(translateDtmfToneToInternal(recognizeChoice.getTone())); } return internalRecognizeChoice; } private DtmfTone translateDtmfToneToInternal(com.azure.communication.callautomation.models.DtmfTone dtmfTone) { return DtmfTone.fromString(dtmfTone.toString()); } }
class CallMediaAsync { private final CallMediasImpl contentsInternal; private final String callConnectionId; private final ClientLogger logger; CallMediaAsync(String callConnectionId, CallMediasImpl contentsInternal) { this.callConnectionId = callConnectionId; this.contentsInternal = contentsInternal; this.logger = new ClientLogger(CallMediaAsync.class); } /** * Play * * @param playSource A {@link PlaySource} representing the source to play. * @param playTo the targets to play to * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Void for successful play request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> play(PlaySource playSource, List<CommunicationIdentifier> playTo) { return playWithResponse(playSource, playTo, null).flatMap(FluxUtil::toMono); } /** * Play to all participants * * @param playSource A {@link PlaySource} representing the source to play. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Void for successful playAll request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> playToAll(PlaySource playSource) { return playToAllWithResponse(playSource, null).flatMap(FluxUtil::toMono); } /** * Play * * @param playSource A {@link PlaySource} representing the source to play. * @param playTo the targets to play to * @param options play options. * @return Response for successful play request. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> playWithResponse(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options) { return playWithResponseInternal(playSource, playTo, options, null); } /** * Play to all participants * * @param playSource A {@link PlaySource} representing the source to play. * @param options play options. * @return Response for successful playAll request. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> playToAllWithResponse(PlaySource playSource, PlayOptions options) { return playWithResponseInternal(playSource, Collections.emptyList(), options, null); } /** * Recognize operation. * @param recognizeOptions Different attributes for recognize. * @return Response for successful recognize request. */ public Mono<Void> startRecognizing(CallMediaRecognizeOptions recognizeOptions) { return startRecognizingWithResponse(recognizeOptions).then(); } /** * Recognize operation * @param recognizeOptions Different attributes for recognize. * @return Response for successful recognize request. */ public Mono<Response<Void>> startRecognizingWithResponse(CallMediaRecognizeOptions recognizeOptions) { return withContext(context -> recognizeWithResponseInternal(recognizeOptions, context)); } Mono<Response<Void>> recognizeWithResponseInternal(CallMediaRecognizeOptions recognizeOptions, Context context) { try { context = context == null ? Context.NONE : context; if (recognizeOptions instanceof CallMediaRecognizeDtmfOptions) { CallMediaRecognizeDtmfOptions dtmfRecognizeOptions = (CallMediaRecognizeDtmfOptions) recognizeOptions; DtmfOptionsInternal dtmfOptionsInternal = if (dtmfRecognizeOptions.getStopTones() != null) { List<DtmfToneInternal> dtmfTones = dtmfRecognizeOptions.getStopTones().stream() .map(this::translateDtmfToneInternal) .collect(Collectors.toList()); dtmfOptionsInternal.setStopTones(dtmfTones); } RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setDtmfOptions(dtmfOptionsInternal) .setInterruptPrompt(recognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(recognizeOptions.getTargetParticipant())); recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) recognizeOptions.getInitialSilenceTimeout().getSeconds()); PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); playSourceInternal = translatePlaySourceToPlaySourceInternal(playSource); } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(recognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(recognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else if (recognizeOptions instanceof CallMediaRecognizeChoiceOptions) { CallMediaRecognizeChoiceOptions choiceRecognizeOptions = (CallMediaRecognizeChoiceOptions) recognizeOptions; RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setChoices(translateListRecognizeChoiceInternal(choiceRecognizeOptions.getRecognizeChoices())) .setInterruptPrompt(choiceRecognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(choiceRecognizeOptions.getTargetParticipant())); recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) choiceRecognizeOptions.getInitialSilenceTimeout().getSeconds()); PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); playSourceInternal = translatePlaySourceToPlaySourceInternal(playSource); } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(choiceRecognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(choiceRecognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else { return monoError(logger, new UnsupportedOperationException(recognizeOptions.getClass().getName())); } } catch (RuntimeException e) { return monoError(logger, e); } } /** * Cancels all the queued media operations. * @return Void */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> cancelAllMediaOperations() { return cancelAllMediaOperationsWithResponse().then(); } /** * Cancels all the queued media operations * @return Response for successful playAll request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelAllMediaOperationsWithResponse() { return cancelAllMediaOperationsWithResponseInternal(null); } Mono<Response<Void>> cancelAllMediaOperationsWithResponseInternal(Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return contentsInternal.cancelAllMediaOperationsWithResponseAsync(callConnectionId, contextValue) .onErrorMap(HttpResponseException.class, ErrorConstructorProxy::create); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> playWithResponseInternal(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; PlayRequest request = getPlayRequest(playSource, playTo, options); return contentsInternal.playWithResponseAsync(callConnectionId, request, contextValue) .onErrorMap(HttpResponseException.class, ErrorConstructorProxy::create); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } PlayRequest getPlayRequest(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options) { PlaySourceInternal playSourceInternal = new PlaySourceInternal(); if (playSource instanceof FileSource) { playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) playSource); } else if (playSource instanceof TextSource) { playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) playSource); } if (playSourceInternal.getSourceType() != null) { PlayRequest request = new PlayRequest() .setPlaySourceInfo(playSourceInternal) .setPlayTo( playTo .stream() .map(CommunicationIdentifierConverter::convert) .collect(Collectors.toList())); if (options != null) { request.setPlayOptions(new PlayOptionsInternal().setLoop(options.isLoop())); request.setOperationContext(options.getOperationContext()); } return request; } throw logger.logExceptionAsError(new IllegalArgumentException(playSource.getClass().getCanonicalName())); } private PlaySourceInternal getPlaySourceInternalFromFileSource(FileSource playSource) { FileSourceInternal fileSourceInternal = new FileSourceInternal().setUri(playSource.getUri()); PlaySourceInternal playSourceInternal = new PlaySourceInternal() .setSourceType(PlaySourceTypeInternal.FILE) .setFileSource(fileSourceInternal) .setPlaySourceId(playSource.getPlaySourceId()); return playSourceInternal; } private PlaySourceInternal getPlaySourceInternalFromTextSource(TextSource playSource) { TextSourceInternal textSourceInternal = new TextSourceInternal().setText(playSource.getText()); if (playSource.getVoiceGender() != null) { textSourceInternal.setVoiceGender(GenderTypeInternal.fromString(playSource.getVoiceGender().toString())); } if (playSource.getSourceLocale() != null) { textSourceInternal.setSourceLocale(playSource.getSourceLocale()); } if (playSource.getVoiceName() != null) { textSourceInternal.setVoiceName(playSource.getVoiceName()); } PlaySourceInternal playSourceInternal = new PlaySourceInternal() .setSourceType(PlaySourceTypeInternal.TEXT) .setTextSource(textSourceInternal) .setPlaySourceId(playSource.getPlaySourceId()); return playSourceInternal; } private PlaySourceInternal translatePlaySourceToPlaySourceInternal(PlaySource playSource) { PlaySourceInternal playSourceInternal = new PlaySourceInternal(); if (playSource instanceof FileSource) { playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) playSource); } else if (playSource instanceof TextSource) { playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) playSource); } return playSourceInternal; } private List<RecognizeChoiceInternal> translateListRecognizeChoiceInternal(List<RecognizeChoice> recognizeChoices) { return recognizeChoices.stream() .map(this::translateRecognizeChoiceInternal) .collect(Collectors.toList()); } private RecognizeChoiceInternal translateRecognizeChoiceInternal(RecognizeChoice recognizeChoice) { RecognizeChoiceInternal internalRecognizeChoice = new RecognizeChoiceInternal(); if (recognizeChoice.getLabel() != null) { internalRecognizeChoice.setLabel(recognizeChoice.getLabel()); } if (recognizeChoice.getPhrases() != null) { internalRecognizeChoice.setPhrases(recognizeChoice.getPhrases()); } if (recognizeChoice.getTone() != null) { internalRecognizeChoice.setTone(translateDtmfToneInternal(recognizeChoice.getTone())); } return internalRecognizeChoice; } private DtmfToneInternal translateDtmfToneInternal(DtmfTone dtmfTone) { return DtmfToneInternal.fromString(dtmfTone.toString()); } }
for the totalAcquireChannels -> are we trying to track how many times we opened a connection? then the right place to track should be in `notifyChannelConnect`. `doAcquireChannel ` will be called for every request regardless whether this is a new established channel or reuse an existing one
private void doAcquireChannel(final ChannelPromiseWithExpiryTime promise, final Channel candidate) { this.ensureInEventLoop(); acquiredChannels.put(candidate, candidate); totalAcquiredChannels.incrementAndGet(); final ChannelPromiseWithExpiryTime anotherPromise = this.newChannelPromiseForAvailableChannel(promise, candidate); final EventLoop loop = candidate.eventLoop(); if (loop.inEventLoop()) { this.doChannelHealthCheck(candidate, anotherPromise); } else { loop.execute(() -> this.doChannelHealthCheck(candidate, anotherPromise)); } }
totalAcquiredChannels.incrementAndGet();
private void doAcquireChannel(final ChannelPromiseWithExpiryTime promise, final Channel candidate) { this.ensureInEventLoop(); acquiredChannels.put(candidate, candidate); final ChannelPromiseWithExpiryTime anotherPromise = this.newChannelPromiseForAvailableChannel(promise, candidate); final EventLoop loop = candidate.eventLoop(); if (loop.inEventLoop()) { this.doChannelHealthCheck(candidate, anotherPromise); } else { loop.execute(() -> this.doChannelHealthCheck(candidate, anotherPromise)); } }
class and should be pulled up to RntbdServiceEndpoint or this.acquisitionTimeoutInNanos = config.connectionAcquisitionTimeoutInNanos(); this.allocatorMetric = config.allocator().metric(); this.maxChannels = config.maxChannelsPerEndpoint(); this.maxRequestsPerChannel = config.maxRequestsPerChannel(); this.maxPendingAcquisitions = Integer.MAX_VALUE; this.releaseHealthCheck = true; this.acquisitionTimeoutTask = acquisitionTimeoutInNanos <= 0 ? null : new AcquireTimeoutTask(this) { /** * Fails a request due to a channel acquisition timeout. * * @param task a {@link AcquireListener channel acquisition task} that has timed out. */ @Override public void onTimeout(AcquireListener task) { task.originalPromise.setFailure(ACQUISITION_TIMEOUT); RntbdChannelAcquisitionTimeline.startNewEvent( task.originalPromise.getChannelAcquisitionTimeline(), RntbdChannelAcquisitionEventType.PENDING_TIME_OUT, clientTelemetry); } }
class and should be pulled up to RntbdServiceEndpoint or this.acquisitionTimeoutInNanos = config.connectionAcquisitionTimeoutInNanos(); this.allocatorMetric = config.allocator().metric(); this.maxChannels = config.maxChannelsPerEndpoint(); this.maxRequestsPerChannel = config.maxRequestsPerChannel(); this.maxPendingAcquisitions = Integer.MAX_VALUE; this.releaseHealthCheck = true; this.acquisitionTimeoutTask = acquisitionTimeoutInNanos <= 0 ? null : new AcquireTimeoutTask(this) { /** * Fails a request due to a channel acquisition timeout. * * @param task a {@link AcquireListener channel acquisition task} that has timed out. */ @Override public void onTimeout(AcquireListener task) { task.originalPromise.setFailure(ACQUISITION_TIMEOUT); RntbdChannelAcquisitionTimeline.startNewEvent( task.originalPromise.getChannelAcquisitionTimeline(), RntbdChannelAcquisitionEventType.PENDING_TIME_OUT, clientTelemetry); } }
will change the style
Mono<Response<Void>> recognizeWithResponseInternal(CallMediaRecognizeOptions recognizeOptions, Context context) { try { context = context == null ? Context.NONE : context; if (recognizeOptions instanceof CallMediaRecognizeDtmfOptions) { CallMediaRecognizeDtmfOptions dtmfRecognizeOptions = (CallMediaRecognizeDtmfOptions) recognizeOptions; DtmfOptionsInternal dtmfOptionsInternal = new DtmfOptionsInternal(); dtmfOptionsInternal.setInterToneTimeoutInSeconds((int) dtmfRecognizeOptions.getInterToneTimeout().getSeconds()); if (dtmfRecognizeOptions.getMaxTonesToCollect() != null) { dtmfOptionsInternal.setMaxTonesToCollect(dtmfRecognizeOptions.getMaxTonesToCollect()); } if (dtmfRecognizeOptions.getStopTones() != null) { List<DtmfTone> dtmfTones = dtmfRecognizeOptions.getStopTones().stream() .map(e -> translateDtmfToneToInternal(e)) .collect(Collectors.toList()); dtmfOptionsInternal.setStopTones(dtmfTones); } RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setDtmfOptions(dtmfOptionsInternal) .setInterruptPrompt(recognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(recognizeOptions.getTargetParticipant())); recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) recognizeOptions.getInitialSilenceTimeout().getSeconds()); PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); playSourceInternal = translatePlaySourceToPlaySourceInternal(playSource); } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(recognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(recognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else if (recognizeOptions instanceof CallMediaRecognizeChoiceOptions) { CallMediaRecognizeChoiceOptions choiceRecognizeOptions = (CallMediaRecognizeChoiceOptions) recognizeOptions; RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setChoices(translateRecognizeChoiceListtoInternal(choiceRecognizeOptions.getRecognizeChoices())) .setInterruptPrompt(choiceRecognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(choiceRecognizeOptions.getTargetParticipant())); recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) choiceRecognizeOptions.getInitialSilenceTimeout().getSeconds()); PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); playSourceInternal = translatePlaySourceToPlaySourceInternal(playSource); } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(choiceRecognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(choiceRecognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else { return monoError(logger, new UnsupportedOperationException(recognizeOptions.getClass().getName())); } } catch (RuntimeException e) { return monoError(logger, e); } }
.map(e -> translateDtmfToneToInternal(e))
new DtmfOptionsInternal(); dtmfOptionsInternal.setInterToneTimeoutInSeconds((int) dtmfRecognizeOptions.getInterToneTimeout().getSeconds()); if (dtmfRecognizeOptions.getMaxTonesToCollect() != null) { dtmfOptionsInternal.setMaxTonesToCollect(dtmfRecognizeOptions.getMaxTonesToCollect()); }
class CallMediaAsync { private final CallMediasImpl contentsInternal; private final String callConnectionId; private final ClientLogger logger; CallMediaAsync(String callConnectionId, CallMediasImpl contentsInternal) { this.callConnectionId = callConnectionId; this.contentsInternal = contentsInternal; this.logger = new ClientLogger(CallMediaAsync.class); } /** * Play * * @param playSource A {@link PlaySource} representing the source to play. * @param playTo the targets to play to * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Void for successful play request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> play(PlaySource playSource, List<CommunicationIdentifier> playTo) { return playWithResponse(playSource, playTo, null).flatMap(FluxUtil::toMono); } /** * Play to all participants * * @param playSource A {@link PlaySource} representing the source to play. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Void for successful playAll request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> playToAll(PlaySource playSource) { return playToAllWithResponse(playSource, null).flatMap(FluxUtil::toMono); } /** * Play * * @param playSource A {@link PlaySource} representing the source to play. * @param playTo the targets to play to * @param options play options. * @return Response for successful play request. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> playWithResponse(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options) { return playWithResponseInternal(playSource, playTo, options, null); } /** * Play to all participants * * @param playSource A {@link PlaySource} representing the source to play. * @param options play options. * @return Response for successful playAll request. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> playToAllWithResponse(PlaySource playSource, PlayOptions options) { return playWithResponseInternal(playSource, Collections.emptyList(), options, null); } /** * Recognize operation. * @param recognizeOptions Different attributes for recognize. * @return Response for successful recognize request. */ public Mono<Void> startRecognizing(CallMediaRecognizeOptions recognizeOptions) { return startRecognizingWithResponse(recognizeOptions).then(); } /** * Recognize operation * @param recognizeOptions Different attributes for recognize. * @return Response for successful recognize request. */ public Mono<Response<Void>> startRecognizingWithResponse(CallMediaRecognizeOptions recognizeOptions) { return withContext(context -> recognizeWithResponseInternal(recognizeOptions, context)); } Mono<Response<Void>> recognizeWithResponseInternal(CallMediaRecognizeOptions recognizeOptions, Context context) { try { context = context == null ? Context.NONE : context; if (recognizeOptions instanceof CallMediaRecognizeDtmfOptions) { CallMediaRecognizeDtmfOptions dtmfRecognizeOptions = (CallMediaRecognizeDtmfOptions) recognizeOptions; DtmfOptionsInternal dtmfOptionsInternal = if (dtmfRecognizeOptions.getStopTones() != null) { List<DtmfTone> dtmfTones = dtmfRecognizeOptions.getStopTones().stream() .map(e -> translateDtmfToneToInternal(e)) .collect(Collectors.toList()); dtmfOptionsInternal.setStopTones(dtmfTones); } RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setDtmfOptions(dtmfOptionsInternal) .setInterruptPrompt(recognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(recognizeOptions.getTargetParticipant())); recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) recognizeOptions.getInitialSilenceTimeout().getSeconds()); PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); playSourceInternal = translatePlaySourceToPlaySourceInternal(playSource); } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(recognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(recognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else if (recognizeOptions instanceof CallMediaRecognizeChoiceOptions) { CallMediaRecognizeChoiceOptions choiceRecognizeOptions = (CallMediaRecognizeChoiceOptions) recognizeOptions; RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setChoices(translateRecognizeChoiceListtoInternal(choiceRecognizeOptions.getRecognizeChoices())) .setInterruptPrompt(choiceRecognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(choiceRecognizeOptions.getTargetParticipant())); recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) choiceRecognizeOptions.getInitialSilenceTimeout().getSeconds()); PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); playSourceInternal = translatePlaySourceToPlaySourceInternal(playSource); } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(choiceRecognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(choiceRecognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else { return monoError(logger, new UnsupportedOperationException(recognizeOptions.getClass().getName())); } } catch (RuntimeException e) { return monoError(logger, e); } } /** * Cancels all the queued media operations. * @return Void */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> cancelAllMediaOperations() { return cancelAllMediaOperationsWithResponse().then(); } /** * Cancels all the queued media operations * @return Response for successful playAll request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelAllMediaOperationsWithResponse() { return cancelAllMediaOperationsWithResponseInternal(null); } Mono<Response<Void>> cancelAllMediaOperationsWithResponseInternal(Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return contentsInternal.cancelAllMediaOperationsWithResponseAsync(callConnectionId, contextValue) .onErrorMap(HttpResponseException.class, ErrorConstructorProxy::create); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> playWithResponseInternal(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; PlayRequest request = getPlayRequest(playSource, playTo, options); return contentsInternal.playWithResponseAsync(callConnectionId, request, contextValue) .onErrorMap(HttpResponseException.class, ErrorConstructorProxy::create); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } PlayRequest getPlayRequest(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options) { PlaySourceInternal playSourceInternal = new PlaySourceInternal(); if (playSource instanceof FileSource) { playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) playSource); } else if (playSource instanceof TextSource) { playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) playSource); } if (playSourceInternal.getSourceType() != null) { PlayRequest request = new PlayRequest() .setPlaySourceInfo(playSourceInternal) .setPlayTo( playTo .stream() .map(CommunicationIdentifierConverter::convert) .collect(Collectors.toList())); if (options != null) { request.setPlayOptions(new PlayOptionsInternal().setLoop(options.isLoop())); request.setOperationContext(options.getOperationContext()); } return request; } throw logger.logExceptionAsError(new IllegalArgumentException(playSource.getClass().getCanonicalName())); } private PlaySourceInternal getPlaySourceInternalFromFileSource(FileSource playSource) { FileSourceInternal fileSourceInternal = new FileSourceInternal().setUri(playSource.getUri()); PlaySourceInternal playSourceInternal = new PlaySourceInternal() .setSourceType(PlaySourceTypeInternal.FILE) .setFileSource(fileSourceInternal) .setPlaySourceId(playSource.getPlaySourceId()); return playSourceInternal; } private PlaySourceInternal getPlaySourceInternalFromTextSource(TextSource playSource) { TextSourceInternal textSourceInternal = new TextSourceInternal().setText(playSource.getText()); if (playSource.getVoiceGender() != null) { textSourceInternal.setVoiceGender(GenderType.fromString(playSource.getVoiceGender().toString())); } if (playSource.getSourceLocale() != null) { textSourceInternal.setSourceLocale(playSource.getSourceLocale()); } if (playSource.getVoiceName() != null) { textSourceInternal.setVoiceName(playSource.getVoiceName()); } PlaySourceInternal playSourceInternal = new PlaySourceInternal() .setSourceType(PlaySourceTypeInternal.TEXT) .setTextSource(textSourceInternal) .setPlaySourceId(playSource.getPlaySourceId()); return playSourceInternal; } private PlaySourceInternal translatePlaySourceToPlaySourceInternal(PlaySource playSource) { PlaySourceInternal playSourceInternal = new PlaySourceInternal(); if (playSource instanceof FileSource) { playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) playSource); } else if (playSource instanceof TextSource) { playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) playSource); } return playSourceInternal; } private List<RecognizeChoice> translateRecognizeChoiceListtoInternal(List<com.azure.communication.callautomation.models.RecognizeChoice> recognizeChoices) { return recognizeChoices.stream() .map(e -> translateRecognizeChoiceToInternal(e)) .collect(Collectors.toList()); } private RecognizeChoice translateRecognizeChoiceToInternal(com.azure.communication.callautomation.models.RecognizeChoice recognizeChoice) { RecognizeChoice internalRecognizeChoice = new RecognizeChoice(); if (recognizeChoice.getLabel() != null) { internalRecognizeChoice.setLabel(recognizeChoice.getLabel()); } if (recognizeChoice.getPhrases() != null) { internalRecognizeChoice.setPhrases(recognizeChoice.getPhrases()); } if (recognizeChoice.getTone() != null) { internalRecognizeChoice.setTone(translateDtmfToneToInternal(recognizeChoice.getTone())); } return internalRecognizeChoice; } private DtmfTone translateDtmfToneToInternal(com.azure.communication.callautomation.models.DtmfTone dtmfTone) { return DtmfTone.fromString(dtmfTone.toString()); } }
class CallMediaAsync { private final CallMediasImpl contentsInternal; private final String callConnectionId; private final ClientLogger logger; CallMediaAsync(String callConnectionId, CallMediasImpl contentsInternal) { this.callConnectionId = callConnectionId; this.contentsInternal = contentsInternal; this.logger = new ClientLogger(CallMediaAsync.class); } /** * Play * * @param playSource A {@link PlaySource} representing the source to play. * @param playTo the targets to play to * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Void for successful play request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> play(PlaySource playSource, List<CommunicationIdentifier> playTo) { return playWithResponse(playSource, playTo, null).flatMap(FluxUtil::toMono); } /** * Play to all participants * * @param playSource A {@link PlaySource} representing the source to play. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return Void for successful playAll request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> playToAll(PlaySource playSource) { return playToAllWithResponse(playSource, null).flatMap(FluxUtil::toMono); } /** * Play * * @param playSource A {@link PlaySource} representing the source to play. * @param playTo the targets to play to * @param options play options. * @return Response for successful play request. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> playWithResponse(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options) { return playWithResponseInternal(playSource, playTo, options, null); } /** * Play to all participants * * @param playSource A {@link PlaySource} representing the source to play. * @param options play options. * @return Response for successful playAll request. * @throws CallingServerErrorException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> playToAllWithResponse(PlaySource playSource, PlayOptions options) { return playWithResponseInternal(playSource, Collections.emptyList(), options, null); } /** * Recognize operation. * @param recognizeOptions Different attributes for recognize. * @return Response for successful recognize request. */ public Mono<Void> startRecognizing(CallMediaRecognizeOptions recognizeOptions) { return startRecognizingWithResponse(recognizeOptions).then(); } /** * Recognize operation * @param recognizeOptions Different attributes for recognize. * @return Response for successful recognize request. */ public Mono<Response<Void>> startRecognizingWithResponse(CallMediaRecognizeOptions recognizeOptions) { return withContext(context -> recognizeWithResponseInternal(recognizeOptions, context)); } Mono<Response<Void>> recognizeWithResponseInternal(CallMediaRecognizeOptions recognizeOptions, Context context) { try { context = context == null ? Context.NONE : context; if (recognizeOptions instanceof CallMediaRecognizeDtmfOptions) { CallMediaRecognizeDtmfOptions dtmfRecognizeOptions = (CallMediaRecognizeDtmfOptions) recognizeOptions; DtmfOptionsInternal dtmfOptionsInternal = if (dtmfRecognizeOptions.getStopTones() != null) { List<DtmfToneInternal> dtmfTones = dtmfRecognizeOptions.getStopTones().stream() .map(this::translateDtmfToneInternal) .collect(Collectors.toList()); dtmfOptionsInternal.setStopTones(dtmfTones); } RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setDtmfOptions(dtmfOptionsInternal) .setInterruptPrompt(recognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(recognizeOptions.getTargetParticipant())); recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) recognizeOptions.getInitialSilenceTimeout().getSeconds()); PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); playSourceInternal = translatePlaySourceToPlaySourceInternal(playSource); } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(recognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(recognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else if (recognizeOptions instanceof CallMediaRecognizeChoiceOptions) { CallMediaRecognizeChoiceOptions choiceRecognizeOptions = (CallMediaRecognizeChoiceOptions) recognizeOptions; RecognizeOptionsInternal recognizeOptionsInternal = new RecognizeOptionsInternal() .setChoices(translateListRecognizeChoiceInternal(choiceRecognizeOptions.getRecognizeChoices())) .setInterruptPrompt(choiceRecognizeOptions.isInterruptPrompt()) .setTargetParticipant(CommunicationIdentifierConverter.convert(choiceRecognizeOptions.getTargetParticipant())); recognizeOptionsInternal.setInitialSilenceTimeoutInSeconds((int) choiceRecognizeOptions.getInitialSilenceTimeout().getSeconds()); PlaySourceInternal playSourceInternal = null; if (recognizeOptions.getPlayPrompt() != null) { PlaySource playSource = recognizeOptions.getPlayPrompt(); playSourceInternal = translatePlaySourceToPlaySourceInternal(playSource); } RecognizeRequest recognizeRequest = new RecognizeRequest() .setRecognizeInputType(RecognizeInputTypeInternal.fromString(choiceRecognizeOptions.getRecognizeInputType().toString())) .setInterruptCallMediaOperation(choiceRecognizeOptions.isInterruptCallMediaOperation()) .setPlayPrompt(playSourceInternal) .setRecognizeOptions(recognizeOptionsInternal) .setOperationContext(recognizeOptions.getOperationContext()); return contentsInternal.recognizeWithResponseAsync(callConnectionId, recognizeRequest, context); } else { return monoError(logger, new UnsupportedOperationException(recognizeOptions.getClass().getName())); } } catch (RuntimeException e) { return monoError(logger, e); } } /** * Cancels all the queued media operations. * @return Void */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> cancelAllMediaOperations() { return cancelAllMediaOperationsWithResponse().then(); } /** * Cancels all the queued media operations * @return Response for successful playAll request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelAllMediaOperationsWithResponse() { return cancelAllMediaOperationsWithResponseInternal(null); } Mono<Response<Void>> cancelAllMediaOperationsWithResponseInternal(Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; return contentsInternal.cancelAllMediaOperationsWithResponseAsync(callConnectionId, contextValue) .onErrorMap(HttpResponseException.class, ErrorConstructorProxy::create); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> playWithResponseInternal(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options, Context context) { try { return withContext(contextValue -> { contextValue = context == null ? contextValue : context; PlayRequest request = getPlayRequest(playSource, playTo, options); return contentsInternal.playWithResponseAsync(callConnectionId, request, contextValue) .onErrorMap(HttpResponseException.class, ErrorConstructorProxy::create); }); } catch (RuntimeException ex) { return monoError(logger, ex); } } PlayRequest getPlayRequest(PlaySource playSource, List<CommunicationIdentifier> playTo, PlayOptions options) { PlaySourceInternal playSourceInternal = new PlaySourceInternal(); if (playSource instanceof FileSource) { playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) playSource); } else if (playSource instanceof TextSource) { playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) playSource); } if (playSourceInternal.getSourceType() != null) { PlayRequest request = new PlayRequest() .setPlaySourceInfo(playSourceInternal) .setPlayTo( playTo .stream() .map(CommunicationIdentifierConverter::convert) .collect(Collectors.toList())); if (options != null) { request.setPlayOptions(new PlayOptionsInternal().setLoop(options.isLoop())); request.setOperationContext(options.getOperationContext()); } return request; } throw logger.logExceptionAsError(new IllegalArgumentException(playSource.getClass().getCanonicalName())); } private PlaySourceInternal getPlaySourceInternalFromFileSource(FileSource playSource) { FileSourceInternal fileSourceInternal = new FileSourceInternal().setUri(playSource.getUri()); PlaySourceInternal playSourceInternal = new PlaySourceInternal() .setSourceType(PlaySourceTypeInternal.FILE) .setFileSource(fileSourceInternal) .setPlaySourceId(playSource.getPlaySourceId()); return playSourceInternal; } private PlaySourceInternal getPlaySourceInternalFromTextSource(TextSource playSource) { TextSourceInternal textSourceInternal = new TextSourceInternal().setText(playSource.getText()); if (playSource.getVoiceGender() != null) { textSourceInternal.setVoiceGender(GenderTypeInternal.fromString(playSource.getVoiceGender().toString())); } if (playSource.getSourceLocale() != null) { textSourceInternal.setSourceLocale(playSource.getSourceLocale()); } if (playSource.getVoiceName() != null) { textSourceInternal.setVoiceName(playSource.getVoiceName()); } PlaySourceInternal playSourceInternal = new PlaySourceInternal() .setSourceType(PlaySourceTypeInternal.TEXT) .setTextSource(textSourceInternal) .setPlaySourceId(playSource.getPlaySourceId()); return playSourceInternal; } private PlaySourceInternal translatePlaySourceToPlaySourceInternal(PlaySource playSource) { PlaySourceInternal playSourceInternal = new PlaySourceInternal(); if (playSource instanceof FileSource) { playSourceInternal = getPlaySourceInternalFromFileSource((FileSource) playSource); } else if (playSource instanceof TextSource) { playSourceInternal = getPlaySourceInternalFromTextSource((TextSource) playSource); } return playSourceInternal; } private List<RecognizeChoiceInternal> translateListRecognizeChoiceInternal(List<RecognizeChoice> recognizeChoices) { return recognizeChoices.stream() .map(this::translateRecognizeChoiceInternal) .collect(Collectors.toList()); } private RecognizeChoiceInternal translateRecognizeChoiceInternal(RecognizeChoice recognizeChoice) { RecognizeChoiceInternal internalRecognizeChoice = new RecognizeChoiceInternal(); if (recognizeChoice.getLabel() != null) { internalRecognizeChoice.setLabel(recognizeChoice.getLabel()); } if (recognizeChoice.getPhrases() != null) { internalRecognizeChoice.setPhrases(recognizeChoice.getPhrases()); } if (recognizeChoice.getTone() != null) { internalRecognizeChoice.setTone(translateDtmfToneInternal(recognizeChoice.getTone())); } return internalRecognizeChoice; } private DtmfToneInternal translateDtmfToneInternal(DtmfTone dtmfTone) { return DtmfToneInternal.fromString(dtmfTone.toString()); } }
missing recording in requests timer and response timer?
public void stop() { this.lifetime.stop(); }
}
public void stop() { this.lifetime.stop(); }
class RntbdRequestArgs { private static final AtomicLong instanceCount = new AtomicLong(); private final Sample sample; private final UUID activityId; private final Instant timeCreated; private final long nanoTimeCreated; private final Stopwatch lifetime; private final String origin; private final Uri physicalAddressUri; private final String replicaPath; private final RxDocumentServiceRequest serviceRequest; private final long transportRequestId; public RntbdRequestArgs(final RxDocumentServiceRequest serviceRequest, final Uri physicalAddressUri) { this.sample = Timer.start(); this.activityId = serviceRequest.getActivityId(); this.timeCreated = Instant.now(); this.nanoTimeCreated = System.nanoTime(); this.lifetime = Stopwatch.createStarted(); this.origin = physicalAddressUri.getURI().getScheme() + ": this.physicalAddressUri = physicalAddressUri; this.replicaPath = StringUtils.stripEnd(physicalAddressUri.getURI().getPath(), "/"); this.serviceRequest = serviceRequest; this.transportRequestId = instanceCount.incrementAndGet(); } @JsonProperty public UUID activityId() { return this.activityId; } @JsonProperty public Duration lifetime() { return this.lifetime.elapsed(); } @JsonIgnore public long nanoTimeCreated() { return this.nanoTimeCreated; } @JsonProperty public String origin() { return this.origin; } @JsonIgnore public Uri physicalAddressUri() { return this.physicalAddressUri; } @JsonProperty public String replicaPath() { return this.replicaPath; } @JsonIgnore public RxDocumentServiceRequest serviceRequest() { return this.serviceRequest; } @JsonProperty public Instant timeCreated() { return this.timeCreated; } @JsonProperty public long transportRequestId() { return this.transportRequestId; } public long stop(Timer requests, Timer responses) { this.lifetime.stop(); return this.lifetime.elapsed(TimeUnit.MILLISECONDS); } @Override public String toString() { return RntbdObjectMapper.toString(this); } public void traceOperation( final Logger logger, final ChannelHandlerContext context, final String operationName, final Object... args) { checkNotNull(logger, "expected non-null logger"); if (logger.isDebugEnabled()) { logger.debug("{},{},\"{}({})\",\"{}\",\"{}\"", this.timeCreated, this.lifetime.elapsed(), operationName, Stream.of(args) .map(arg -> arg == null ? "null" : arg.toString()) .collect(Collectors.joining(",")), this, context); } } }
class RntbdRequestArgs { private static final AtomicLong instanceCount = new AtomicLong(); private final Sample sample; private final UUID activityId; private final Instant timeCreated; private final long nanoTimeCreated; private final Stopwatch lifetime; private final String origin; private final Uri physicalAddressUri; private final String replicaPath; private final RxDocumentServiceRequest serviceRequest; private final long transportRequestId; public RntbdRequestArgs(final RxDocumentServiceRequest serviceRequest, final Uri physicalAddressUri) { this.sample = Timer.start(); this.activityId = serviceRequest.getActivityId(); this.timeCreated = Instant.now(); this.nanoTimeCreated = System.nanoTime(); this.lifetime = Stopwatch.createStarted(); this.origin = physicalAddressUri.getURI().getScheme() + ": this.physicalAddressUri = physicalAddressUri; this.replicaPath = StringUtils.stripEnd(physicalAddressUri.getURI().getPath(), "/"); this.serviceRequest = serviceRequest; this.transportRequestId = instanceCount.incrementAndGet(); } @JsonProperty public UUID activityId() { return this.activityId; } @JsonProperty public Duration lifetime() { return this.lifetime.elapsed(); } @JsonIgnore public long nanoTimeCreated() { return this.nanoTimeCreated; } @JsonProperty public String origin() { return this.origin; } @JsonIgnore public Uri physicalAddressUri() { return this.physicalAddressUri; } @JsonProperty public String replicaPath() { return this.replicaPath; } @JsonIgnore public RxDocumentServiceRequest serviceRequest() { return this.serviceRequest; } @JsonProperty public Instant timeCreated() { return this.timeCreated; } @JsonProperty public long transportRequestId() { return this.transportRequestId; } public void stop(Timer requests, Timer responses) { this.lifetime.stop(); if (requests != null) { this.sample.stop(requests); } if (responses != null) { this.sample.stop(responses); } } @Override public String toString() { return RntbdObjectMapper.toString(this); } public void traceOperation( final Logger logger, final ChannelHandlerContext context, final String operationName, final Object... args) { checkNotNull(logger, "expected non-null logger"); if (logger.isDebugEnabled()) { logger.debug("{},{},\"{}({})\",\"{}\",\"{}\"", this.timeCreated, this.lifetime.elapsed(), operationName, Stream.of(args) .map(arg -> arg == null ? "null" : arg.toString()) .collect(Collectors.joining(",")), this, context); } } }
Yeah - left-over change
public void stop() { this.lifetime.stop(); }
}
public void stop() { this.lifetime.stop(); }
class RntbdRequestArgs { private static final AtomicLong instanceCount = new AtomicLong(); private final Sample sample; private final UUID activityId; private final Instant timeCreated; private final long nanoTimeCreated; private final Stopwatch lifetime; private final String origin; private final Uri physicalAddressUri; private final String replicaPath; private final RxDocumentServiceRequest serviceRequest; private final long transportRequestId; public RntbdRequestArgs(final RxDocumentServiceRequest serviceRequest, final Uri physicalAddressUri) { this.sample = Timer.start(); this.activityId = serviceRequest.getActivityId(); this.timeCreated = Instant.now(); this.nanoTimeCreated = System.nanoTime(); this.lifetime = Stopwatch.createStarted(); this.origin = physicalAddressUri.getURI().getScheme() + ": this.physicalAddressUri = physicalAddressUri; this.replicaPath = StringUtils.stripEnd(physicalAddressUri.getURI().getPath(), "/"); this.serviceRequest = serviceRequest; this.transportRequestId = instanceCount.incrementAndGet(); } @JsonProperty public UUID activityId() { return this.activityId; } @JsonProperty public Duration lifetime() { return this.lifetime.elapsed(); } @JsonIgnore public long nanoTimeCreated() { return this.nanoTimeCreated; } @JsonProperty public String origin() { return this.origin; } @JsonIgnore public Uri physicalAddressUri() { return this.physicalAddressUri; } @JsonProperty public String replicaPath() { return this.replicaPath; } @JsonIgnore public RxDocumentServiceRequest serviceRequest() { return this.serviceRequest; } @JsonProperty public Instant timeCreated() { return this.timeCreated; } @JsonProperty public long transportRequestId() { return this.transportRequestId; } public long stop(Timer requests, Timer responses) { this.lifetime.stop(); return this.lifetime.elapsed(TimeUnit.MILLISECONDS); } @Override public String toString() { return RntbdObjectMapper.toString(this); } public void traceOperation( final Logger logger, final ChannelHandlerContext context, final String operationName, final Object... args) { checkNotNull(logger, "expected non-null logger"); if (logger.isDebugEnabled()) { logger.debug("{},{},\"{}({})\",\"{}\",\"{}\"", this.timeCreated, this.lifetime.elapsed(), operationName, Stream.of(args) .map(arg -> arg == null ? "null" : arg.toString()) .collect(Collectors.joining(",")), this, context); } } }
class RntbdRequestArgs { private static final AtomicLong instanceCount = new AtomicLong(); private final Sample sample; private final UUID activityId; private final Instant timeCreated; private final long nanoTimeCreated; private final Stopwatch lifetime; private final String origin; private final Uri physicalAddressUri; private final String replicaPath; private final RxDocumentServiceRequest serviceRequest; private final long transportRequestId; public RntbdRequestArgs(final RxDocumentServiceRequest serviceRequest, final Uri physicalAddressUri) { this.sample = Timer.start(); this.activityId = serviceRequest.getActivityId(); this.timeCreated = Instant.now(); this.nanoTimeCreated = System.nanoTime(); this.lifetime = Stopwatch.createStarted(); this.origin = physicalAddressUri.getURI().getScheme() + ": this.physicalAddressUri = physicalAddressUri; this.replicaPath = StringUtils.stripEnd(physicalAddressUri.getURI().getPath(), "/"); this.serviceRequest = serviceRequest; this.transportRequestId = instanceCount.incrementAndGet(); } @JsonProperty public UUID activityId() { return this.activityId; } @JsonProperty public Duration lifetime() { return this.lifetime.elapsed(); } @JsonIgnore public long nanoTimeCreated() { return this.nanoTimeCreated; } @JsonProperty public String origin() { return this.origin; } @JsonIgnore public Uri physicalAddressUri() { return this.physicalAddressUri; } @JsonProperty public String replicaPath() { return this.replicaPath; } @JsonIgnore public RxDocumentServiceRequest serviceRequest() { return this.serviceRequest; } @JsonProperty public Instant timeCreated() { return this.timeCreated; } @JsonProperty public long transportRequestId() { return this.transportRequestId; } public void stop(Timer requests, Timer responses) { this.lifetime.stop(); if (requests != null) { this.sample.stop(requests); } if (responses != null) { this.sample.stop(responses); } } @Override public String toString() { return RntbdObjectMapper.toString(this); } public void traceOperation( final Logger logger, final ChannelHandlerContext context, final String operationName, final Object... args) { checkNotNull(logger, "expected non-null logger"); if (logger.isDebugEnabled()) { logger.debug("{},{},\"{}({})\",\"{}\",\"{}\"", this.timeCreated, this.lifetime.elapsed(), operationName, Stream.of(args) .map(arg -> arg == null ? "null" : arg.toString()) .collect(Collectors.joining(",")), this, context); } } }
Fixed
private void doAcquireChannel(final ChannelPromiseWithExpiryTime promise, final Channel candidate) { this.ensureInEventLoop(); acquiredChannels.put(candidate, candidate); totalAcquiredChannels.incrementAndGet(); final ChannelPromiseWithExpiryTime anotherPromise = this.newChannelPromiseForAvailableChannel(promise, candidate); final EventLoop loop = candidate.eventLoop(); if (loop.inEventLoop()) { this.doChannelHealthCheck(candidate, anotherPromise); } else { loop.execute(() -> this.doChannelHealthCheck(candidate, anotherPromise)); } }
totalAcquiredChannels.incrementAndGet();
private void doAcquireChannel(final ChannelPromiseWithExpiryTime promise, final Channel candidate) { this.ensureInEventLoop(); acquiredChannels.put(candidate, candidate); final ChannelPromiseWithExpiryTime anotherPromise = this.newChannelPromiseForAvailableChannel(promise, candidate); final EventLoop loop = candidate.eventLoop(); if (loop.inEventLoop()) { this.doChannelHealthCheck(candidate, anotherPromise); } else { loop.execute(() -> this.doChannelHealthCheck(candidate, anotherPromise)); } }
class and should be pulled up to RntbdServiceEndpoint or this.acquisitionTimeoutInNanos = config.connectionAcquisitionTimeoutInNanos(); this.allocatorMetric = config.allocator().metric(); this.maxChannels = config.maxChannelsPerEndpoint(); this.maxRequestsPerChannel = config.maxRequestsPerChannel(); this.maxPendingAcquisitions = Integer.MAX_VALUE; this.releaseHealthCheck = true; this.acquisitionTimeoutTask = acquisitionTimeoutInNanos <= 0 ? null : new AcquireTimeoutTask(this) { /** * Fails a request due to a channel acquisition timeout. * * @param task a {@link AcquireListener channel acquisition task} that has timed out. */ @Override public void onTimeout(AcquireListener task) { task.originalPromise.setFailure(ACQUISITION_TIMEOUT); RntbdChannelAcquisitionTimeline.startNewEvent( task.originalPromise.getChannelAcquisitionTimeline(), RntbdChannelAcquisitionEventType.PENDING_TIME_OUT, clientTelemetry); } }
class and should be pulled up to RntbdServiceEndpoint or this.acquisitionTimeoutInNanos = config.connectionAcquisitionTimeoutInNanos(); this.allocatorMetric = config.allocator().metric(); this.maxChannels = config.maxChannelsPerEndpoint(); this.maxRequestsPerChannel = config.maxRequestsPerChannel(); this.maxPendingAcquisitions = Integer.MAX_VALUE; this.releaseHealthCheck = true; this.acquisitionTimeoutTask = acquisitionTimeoutInNanos <= 0 ? null : new AcquireTimeoutTask(this) { /** * Fails a request due to a channel acquisition timeout. * * @param task a {@link AcquireListener channel acquisition task} that has timed out. */ @Override public void onTimeout(AcquireListener task) { task.originalPromise.setFailure(ACQUISITION_TIMEOUT); RntbdChannelAcquisitionTimeline.startNewEvent( task.originalPromise.getChannelAcquisitionTimeline(), RntbdChannelAcquisitionEventType.PENDING_TIME_OUT, clientTelemetry); } }
Since these are created in the constructor, how it will react to the dynamic option modification? (for example, when I remove or add a new metrics category, will it reevaluate requests timer etc?)
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) { Tags tags = Tags.of(endpoint.clientMetricTag()); this.metricCategories = client.getMetricCategories(); if (metricCategories.contains(MetricCategory.DirectRequests)) { CosmosMeterOptions options = client .getMeterOptions(CosmosMeterName.DIRECT_REQUEST_LATENCY); if (options.isEnabled()) { this.requests = Timer .builder(options.getMeterName().toString()) .description("RNTBD request latency") .maximumExpectedValue(Duration.ofSeconds(300)) .publishPercentiles(optionsAccessor.getPercentiles(options)) .publishPercentileHistogram(optionsAccessor.isHistogramPublishingEnabled(options)) .tags(getEffectiveTags(tags, options)) .register(registry); } else { this.requests = null; } options = client .getMeterOptions(CosmosMeterName.DIRECT_REQUEST_LATENCY_FAILED); if (options.isEnabled()) { this.responseErrors = Timer .builder(options.getMeterName().toString()) .description("RNTBD failed request latency") .maximumExpectedValue(Duration.ofSeconds(300)) .publishPercentiles(optionsAccessor.getPercentiles(options)) .publishPercentileHistogram(optionsAccessor.isHistogramPublishingEnabled(options)) .tags(getEffectiveTags(tags, options)) .register(registry); } else { this.responseErrors = null; } options = client .getMeterOptions(CosmosMeterName.DIRECT_REQUEST_LATENCY_SUCCESS); if (options.isEnabled()) { this.responseSuccesses = Timer .builder(options.getMeterName().toString()) .description("RNTBD successful request latency") .maximumExpectedValue(Duration.ofSeconds(300)) .publishPercentiles(optionsAccessor.getPercentiles(options)) .publishPercentileHistogram(optionsAccessor.isHistogramPublishingEnabled(options)) .tags(getEffectiveTags(tags, options)) .register(registry); } else { this.responseSuccesses = null; } options = client .getMeterOptions(CosmosMeterName.DIRECT_REQUEST_CONCURRENT_COUNT); if (options.isEnabled()) { Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests) .description("RNTBD concurrent requests (executing or queued request count)") .tags(getEffectiveTags(tags, options)) .register(registry); } options = client .getMeterOptions(CosmosMeterName.DIRECT_REQUEST_QUEUED_COUNT); if (options.isEnabled()) { Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength) .description("RNTBD queued request count") .tags(getEffectiveTags(tags, options)) .register(registry); } options = client .getMeterOptions(CosmosMeterName.DIRECT_REQUEST_SIZE_REQUEST); if (options.isEnabled()) { this.requestSize = DistributionSummary.builder(options.getMeterName().toString()) .description("RNTBD request size (bytes)") .baseUnit("bytes") .tags(getEffectiveTags(tags, options)) .maximumExpectedValue(16_000_000d) .publishPercentileHistogram(false) .publishPercentiles() .register(registry); } else { this.requestSize = null; } options = client .getMeterOptions(CosmosMeterName.DIRECT_REQUEST_SIZE_RESPONSE); if (options.isEnabled()) { this.responseSize = DistributionSummary.builder(options.getMeterName().toString()) .description("RNTBD response size (bytes)") .baseUnit("bytes") .tags(getEffectiveTags(tags, options)) .maximumExpectedValue(16_000_000d) .publishPercentileHistogram(false) .publishPercentiles() .register(registry); } else { this.responseSize = null; } } else { this.requests = null; this.responseErrors = null; this.responseSuccesses = null; this.requestSize = null; this.responseSize= null; } if (metricCategories.contains(MetricCategory.DirectEndpoints)) { CosmosMeterOptions options = client .getMeterOptions(CosmosMeterName.DIRECT_ENDPOINTS_COUNT); if (options.isEnabled()) { Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount) .description("RNTBD endpoint count") .register(registry); } options = client .getMeterOptions(CosmosMeterName.DIRECT_ENDPOINTS_EVICTED); if (options.isEnabled()) { FunctionCounter.builder( options.getMeterName().toString(), client, RntbdTransportClient::endpointEvictionCount) .description("RNTBD endpoint eviction count") .register(registry); } } if (metricCategories.contains(MetricCategory.DirectChannels)) { CosmosMeterOptions options = client .getMeterOptions(CosmosMeterName.DIRECT_CHANNELS_ACQUIRED_COUNT); if (options.isEnabled()) { FunctionCounter.builder( options.getMeterName().toString(), endpoint, RntbdEndpoint::totalChannelsAcquiredMetric) .description("RNTBD acquired channel count") .tags(getEffectiveTags(tags, options)) .register(registry); } options = client .getMeterOptions(CosmosMeterName.DIRECT_CHANNELS_CLOSED_COUNT); if (options.isEnabled()) { FunctionCounter.builder( options.getMeterName().toString(), endpoint, RntbdEndpoint::totalChannelsClosedMetric) .description("RNTBD closed channel count") .tags(getEffectiveTags(tags, options)) .register(registry); } options = client .getMeterOptions(CosmosMeterName.DIRECT_CHANNELS_AVAILABLE_COUNT); if (options.isEnabled()) { Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::channelsAvailableMetric) .description("RNTBD available channel count") .tags(getEffectiveTags(tags, options)) .register(registry); } } }
this.requests = null;
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) { this.tags = Tags.of(endpoint.clientMetricTag(), endpoint.tag()); this.client = client; this.registry = registry; if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) { CosmosMeterOptions options = client .getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT); if (options.isEnabled()) { Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests) .description("RNTBD concurrent requests (executing or queued request count)") .tags(getEffectiveTags(tags, options)) .register(registry); } options = client .getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT); if (options.isEnabled()) { Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength) .description("RNTBD queued request count") .tags(getEffectiveTags(tags, options)) .register(registry); } } if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) { CosmosMeterOptions options = client .getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT); if (options.isEnabled()) { Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount) .description("RNTBD endpoint count") .register(registry); } options = client .getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED); if (options.isEnabled()) { FunctionCounter.builder( options.getMeterName().toString(), client, RntbdTransportClient::endpointEvictionCount) .description("RNTBD endpoint eviction count") .register(registry); } } if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) { CosmosMeterOptions options = client .getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT); if (options.isEnabled()) { FunctionCounter.builder( options.getMeterName().toString(), endpoint, RntbdEndpoint::totalChannelsAcquiredMetric) .description("RNTBD acquired channel count") .tags(getEffectiveTags(tags, options)) .register(registry); } options = client .getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT); if (options.isEnabled()) { FunctionCounter.builder( options.getMeterName().toString(), endpoint, RntbdEndpoint::totalChannelsClosedMetric) .description("RNTBD closed channel count") .tags(getEffectiveTags(tags, options)) .register(registry); } options = client .getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT); if (options.isEnabled()) { Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::channelsAvailableMetric) .description("RNTBD available channel count") .tags(getEffectiveTags(tags, options)) .register(registry); } } }
class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder { private final DistributionSummary requestSize; private final Timer requests; private final Timer responseErrors; private final DistributionSummary responseSize; private final Timer responseSuccesses; private final EnumSet<MetricCategory> metricCategories; public void markComplete(RntbdRequestRecord requestRecord) { if (this.metricCategories.contains(MetricCategory.DirectRequests)) { requestRecord.stop(this.requests, requestRecord.isCompletedExceptionally() ? this.responseErrors : this.responseSuccesses); if (this.requestSize != null) { this.requestSize.record(requestRecord.requestLength()); } if (this.responseSize != null) { this.responseSize.record(requestRecord.responseLength()); } } else { requestRecord.stop(); } } }
class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder { private final RntbdTransportClient client; private final Tags tags; private final MeterRegistry registry; public void markComplete(RntbdRequestRecord requestRecord) { if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) { Timer requests = null; Timer requestsSuccess = null; Timer requestsFailed = null; CosmosMeterOptions options = this.client .getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY); if (options.isEnabled()) { requests = Timer .builder(options.getMeterName().toString()) .description("RNTBD request latency") .maximumExpectedValue(Duration.ofSeconds(300)) .publishPercentiles(options.getPercentiles()) .publishPercentileHistogram(options.isHistogramPublishingEnabled()) .tags(getEffectiveTags(this.tags, options)) .register(this.registry); } options = client .getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED); if (options.isEnabled()) { requestsFailed = Timer .builder(options.getMeterName().toString()) .description("RNTBD failed request latency") .maximumExpectedValue(Duration.ofSeconds(300)) .publishPercentiles(options.getPercentiles()) .publishPercentileHistogram(options.isHistogramPublishingEnabled()) .tags(getEffectiveTags(tags, options)) .register(registry); } options = client .getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS); if (options.isEnabled()) { requestsSuccess = Timer .builder(options.getMeterName().toString()) .description("RNTBD successful request latency") .maximumExpectedValue(Duration.ofSeconds(300)) .publishPercentiles(options.getPercentiles()) .publishPercentileHistogram(options.isHistogramPublishingEnabled()) .tags(getEffectiveTags(tags, options)) .register(registry); } requestRecord.stop( requests, requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess); options = client .getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST); if (options.isEnabled()) { DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString()) .description("RNTBD request size (bytes)") .baseUnit("bytes") .tags(getEffectiveTags(tags, options)) .maximumExpectedValue(16_000_000d) .publishPercentileHistogram(false) .publishPercentiles() .register(registry); requestSize.record(requestRecord.requestLength()); } options = client .getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE); if (options.isEnabled()) { DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString()) .description("RNTBD response size (bytes)") .baseUnit("bytes") .tags(getEffectiveTags(tags, options)) .maximumExpectedValue(16_000_000d) .publishPercentileHistogram(false) .publishPercentiles() .register(registry); responseSize.record(requestRecord.responseLength()); } } else { requestRecord.stop(); } } }
created an issue for service: https://github.com/Azure/azure-rest-api-specs/issues/22552
public void canCreateVMSSWithEphemeralOSDisk() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUpgradeMode(UpgradeMode.MANUAL) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.isEphemeralOSDisk()); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); uniformVMSS.virtualMachines().updateInstances( uniformVMSS.virtualMachines() .list() .stream() .map(VirtualMachineScaleSetVM::instanceId) .toArray(String[]::new)); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(VirtualMachineScaleSetVM::isLatestScaleSetUpdateApplied)); uniformVMSS.update() .withCapacity(2) .apply(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(VirtualMachineScaleSetVM::isLatestScaleSetUpdateApplied)); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.1.0.0/16") .withSubnet("subnet1", "10.1.0.0/16") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); final String vmssName1 = generateRandomResourceName("vmss", 10); VirtualMachineScaleSet flexVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName1) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network2, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .create(); Assertions.assertTrue(flexVMSS.isEphemeralOSDisk()); VirtualMachine instance1 = this.computeManager .virtualMachines() .getById(flexVMSS.virtualMachines().list().stream().iterator().next().id()); Assertions.assertTrue(instance1.isOSDiskEphemeral()); final String vmName = generateRandomResourceName("vm", 10); VirtualMachine vm = this.computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network2) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withExistingVirtualMachineScaleSet(flexVMSS) .create(); Assertions.assertEquals(vm.virtualMachineScaleSetId(), flexVMSS.id()); Assertions.assertFalse(vm.isOSDiskEphemeral()); }
ResourceManagerUtils.sleep(Duration.ofMinutes(1));
public void canCreateVMSSWithEphemeralOSDisk() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUpgradeMode(UpgradeMode.MANUAL) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.isEphemeralOSDisk()); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); uniformVMSS.virtualMachines().updateInstances( uniformVMSS.virtualMachines() .list() .stream() .map(VirtualMachineScaleSetVM::instanceId) .toArray(String[]::new)); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(VirtualMachineScaleSetVM::isLatestScaleSetUpdateApplied)); uniformVMSS.update() .withCapacity(2) .apply(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(VirtualMachineScaleSetVM::isLatestScaleSetUpdateApplied)); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.1.0.0/16") .withSubnet("subnet1", "10.1.0.0/16") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); final String vmssName1 = generateRandomResourceName("vmss", 10); VirtualMachineScaleSet flexVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName1) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network2, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .create(); Assertions.assertTrue(flexVMSS.isEphemeralOSDisk()); VirtualMachine instance1 = this.computeManager .virtualMachines() .getById(flexVMSS.virtualMachines().list().stream().iterator().next().id()); Assertions.assertTrue(instance1.isOSDiskEphemeral()); final String vmName = generateRandomResourceName("vm", 10); VirtualMachine vm = this.computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network2) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withExistingVirtualMachineScaleSet(flexVMSS) .create(); Assertions.assertEquals(vm.virtualMachineScaleSetId(), flexVMSS.id()); Assertions.assertFalse(vm.isOSDiskEphemeral()); }
class VirtualMachineScaleSetOperationsTests extends ComputeManagementTest { private String rgName = ""; private final Region region = Region.US_WEST; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test public void canCreateVMSSWithPlan() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); final String uname = "jvuser"; Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); PurchasePlan plan = new PurchasePlan() .withName("access_server_byol") .withPublisher("openvpn") .withProduct("openvpnas"); ImageReference imageReference = new ImageReference() .withPublisher("openvpn") .withOffer("openvpnas") .withSku("access_server_byol") .withVersion("latest"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withSpecificLinuxImageVersion(imageReference) .withRootUsername(uname) .withSsh(sshPublicKey()) .withNewDataDisk(1) .withPlan(plan) .create(); VirtualMachineScaleSet currentVirtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertEquals("access_server_byol", currentVirtualMachineScaleSet.plan().name()); Assertions.assertEquals("openvpn", currentVirtualMachineScaleSet.plan().publisher()); Assertions.assertEquals("openvpnas", currentVirtualMachineScaleSet.plan().product()); } @Test public void canUpdateVirtualMachineScaleSetWithExtensionProtectedSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("stg", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); List<StorageAccountKey> keys = storageAccount.getKeys(); Assertions.assertNotNull(keys); Assertions.assertTrue(keys.size() > 0); String storageAccountKey = keys.get(0).value(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withExistingStorageAccount(storageAccount) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .withProtectedSetting("storageAccountName", storageAccount.name()) .withProtectedSetting("storageAccountKey", storageAccountKey) .attach() .create(); Map<String, VirtualMachineScaleSetExtension> extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); VirtualMachineScaleSetExtension extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); VirtualMachineScaleSet scaleSet = this.computeManager.virtualMachineScaleSets().getById(virtualMachineScaleSet.id()); extensions = scaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); int newCapacity = scaleSet.capacity() + 1; virtualMachineScaleSet.update().withCapacity(newCapacity).apply(); extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); } @Test public void canCreateVirtualMachineScaleSetWithCustomScriptExtension() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .attach() .withUpgradeMode(UpgradeMode.MANUAL) .create(); checkVMInstances(virtualMachineScaleSet); List<String> publicIPAddressIds = virtualMachineScaleSet.primaryPublicIpAddressIds(); PublicIpAddress publicIPAddress = this.networkManager.publicIpAddresses().getById(publicIPAddressIds.get(0)); String fqdn = publicIPAddress.fqdn(); Assertions.assertNotNull(fqdn); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { PagedIterable<VirtualMachineScaleSetNetworkInterface> networkInterfaces = vm.listNetworkInterfaces(); Assertions.assertEquals(TestUtilities.getSize(networkInterfaces), 1); VirtualMachineScaleSetNetworkInterface networkInterface = networkInterfaces.iterator().next(); VirtualMachineScaleSetNicIpConfiguration primaryIpConfig = null; primaryIpConfig = networkInterface.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Integer sshFrontendPort = null; List<LoadBalancerInboundNatRule> natRules = primaryIpConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule natRule : natRules) { if (natRule.backendPort() == 22) { sshFrontendPort = natRule.frontendPort(); break; } } Assertions.assertNotNull(sshFrontendPort); } } @Test public void canCreateVirtualMachineScaleSetWithOptionalNetworkSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vmssVmDnsLabel = generateRandomResourceName("pip", 10); final String nsgName = generateRandomResourceName("nsg", 10); final String asgName = generateRandomResourceName("asg", 8); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ApplicationSecurityGroup asg = this .networkManager .applicationSecurityGroups() .define(asgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withVirtualMachinePublicIp(vmssVmDnsLabel) .withExistingApplicationSecurityGroup(asg) .create(); VirtualMachineScaleSetPublicIpAddressConfiguration currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.dnsSettings()); Assertions.assertNotNull(currentIpConfig.dnsSettings().domainNameLabel()); currentIpConfig.withIdleTimeoutInMinutes(20); virtualMachineScaleSet.update().withVirtualMachinePublicIp(currentIpConfig).apply(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); virtualMachineScaleSet.refresh(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); List<String> asgIds = virtualMachineScaleSet.applicationSecurityGroupIds(); Assertions.assertNotNull(asgIds); Assertions.assertEquals(1, asgIds.size()); NetworkSecurityGroup nsg = networkManager .networkSecurityGroups() .define(nsgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineRule("rule1") .allowOutbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); virtualMachineScaleSet.deallocate(); virtualMachineScaleSet .update() .withIpForwarding() .withAcceleratedNetworking() .withExistingNetworkSecurityGroup(nsg) .apply(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet.refresh(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet .update() .withoutIpForwarding() .withoutAcceleratedNetworking() .withoutNetworkSecurityGroup() .apply(); Assertions.assertFalse(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertFalse(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNull(virtualMachineScaleSet.networkSecurityGroupId()); } @Test @Disabled("Mock framework doesn't support data plane") public void canCreateVirtualMachineScaleSetWithSecret() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vaultName = generateRandomResourceName("vlt", 10); final String secretName = generateRandomResourceName("srt", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); Vault vault = this .keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowSecretAllPermissions() .attach() .withDeploymentEnabled() .create(); final InputStream embeddedJsonConfig = this.getClass().getResourceAsStream("/myTest.txt"); String secretValue = IOUtils.toString(embeddedJsonConfig, StandardCharsets.UTF_8); Secret secret = vault.secrets().define(secretName).withValue(secretValue).create(); List<VaultCertificate> certs = new ArrayList<>(); certs.add(new VaultCertificate().withCertificateUrl(secret.id())); List<VaultSecretGroup> group = new ArrayList<>(); group .add( new VaultSecretGroup() .withSourceVault(new SubResource().withId(vault.id())) .withVaultCertificates(certs)); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSecrets(group) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() > 0); } virtualMachineScaleSet.update().withoutSecrets().apply(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() == 0); } } @Test public void canCreateVirtualMachineScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .create(); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = virtualMachineScaleSet.listNetworkInterfaces(); int nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Assertions.assertNotNull(nic.id()); Assertions .assertTrue(nic.virtualMachineId().toLowerCase().startsWith(virtualMachineScaleSet.id().toLowerCase())); Assertions.assertNotNull(nic.macAddress()); Assertions.assertNotNull(nic.dnsServers()); Assertions.assertNotNull(nic.appliedDnsServers()); Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); Assertions.assertTrue(ipConfig.isPrimary()); Assertions.assertNotNull(ipConfig.subnetName()); Assertions.assertTrue(primaryNetwork.id().toLowerCase().equalsIgnoreCase(ipConfig.networkId())); Assertions.assertNotNull(ipConfig.privateIpAddress()); Assertions.assertNotNull(ipConfig.privateIpAddressVersion()); Assertions.assertNotNull(ipConfig.privateIpAllocationMethod()); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertEquals(lbBackends.size(), 2); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); Assertions.assertEquals(lbNatRules.size(), 2); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099)); Assertions.assertTrue(lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 23); } } } Assertions.assertTrue(nicCount > 0); Assertions.assertEquals(virtualMachineScaleSet.vhdContainers().size(), 2); Assertions.assertEquals(virtualMachineScaleSet.sku(), VirtualMachineScaleSetSkuTypes.STANDARD_A0); Assertions.assertTrue(virtualMachineScaleSet.upgradeModel() == UpgradeMode.AUTOMATIC); Assertions.assertEquals(virtualMachineScaleSet.capacity(), 2); primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); String inboundNatPoolToRemove = null; for (String inboundNatPoolName : virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().keySet()) { inboundNatPoolToRemove = inboundNatPoolName; break; } LoadBalancer internalLoadBalancer = createInternalLoadBalancer(region, resourceGroup, primaryNetwork, "1"); virtualMachineScaleSet .update() .withExistingPrimaryInternalLoadBalancer(internalLoadBalancer) .withoutPrimaryInternetFacingLoadBalancerNatPools(inboundNatPoolToRemove) .apply(); virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 1); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 2); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 2); nics = virtualMachineScaleSet.listNetworkInterfaces(); nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertNotNull(lbBackends); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443) || (rule.frontendPort() == 1000 && rule.backendPort() == 1000) || (rule.frontendPort() == 1001 && rule.backendPort() == 1001)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099) || (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 8000 && lbNatRule.frontendPort() <= 8099) || (lbNatRule.frontendPort() >= 9000 && lbNatRule.frontendPort() <= 9099)); Assertions .assertTrue( lbNatRule.backendPort() == 23 || lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 44 || lbNatRule.backendPort() == 45); } } } Assertions.assertTrue(nicCount > 0); } /* * Previously name * canCreateTwoRegionalVirtualMachineScaleSetsAndAssociateEachWithDifferentBackendPoolOfZoneResilientLoadBalancer * but this was too long for some OSes and would cause git checkout to fail. */ @Test public void canCreateTwoRegionalVMScaleSetsWithDifferentPoolOfZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); List<String> natpools = new ArrayList<>(); for (String natPool : publicLoadBalancer.inboundNatPools().keySet()) { natpools.add(natPool); } Assertions.assertTrue(natpools.size() == 2); final String vmssName1 = generateRandomResourceName("vmss1", 10); VirtualMachineScaleSet virtualMachineScaleSet1 = this .computeManager .virtualMachineScaleSets() .define(vmssName1) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(0)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); final String vmssName2 = generateRandomResourceName("vmss2", 10); VirtualMachineScaleSet virtualMachineScaleSet2 = this .computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(1)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(virtualMachineScaleSet1.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet1.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); Assertions.assertNull(virtualMachineScaleSet2.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet2.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); } @Test public void canCreateZoneRedundantVirtualMachineScaleSetWithZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); final String vmssName = generateRandomResourceName("vmss", 10); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withAvailabilityZone(AvailabilityZoneId.ZONE_1) .withAvailabilityZone(AvailabilityZoneId.ZONE_2) .create(); Assertions.assertNotNull(virtualMachineScaleSet.availabilityZones()); Assertions.assertEquals(2, virtualMachineScaleSet.availabilityZones().size()); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithoutRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .create(); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse( found, "Resource group should not have a role assignment with virtual machine scale set MSI principal"); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithMultipleRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("jvcsrg", 10)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .withSystemAssignedIdentityBasedAccessTo(storageAccount.id(), BuiltInRole.CONTRIBUTOR) .create(); Assertions.assertNotNull(virtualMachineScaleSet.managedServiceIdentityType()); Assertions .assertTrue( virtualMachineScaleSet.managedServiceIdentityType().equals(ResourceIdentityType.SYSTEM_ASSIGNED)); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Resource group should have a role assignment with virtual machine scale set MSI principal"); PagedIterable<RoleAssignment> stgRoleAssignments = authorizationManager.roleAssignments().listByScope(storageAccount.id()); Assertions.assertNotNull(stgRoleAssignments); found = false; for (RoleAssignment roleAssignment : stgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Storage account should have a role assignment with virtual machine scale set MSI principal"); } @Test public void canGetSingleVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = virtualMachineScaleSet.virtualMachines(); VirtualMachineScaleSetVM firstVm = virtualMachineScaleSetVMs.list().iterator().next(); VirtualMachineScaleSetVM fetchedVm = virtualMachineScaleSetVMs.getInstance(firstVm.instanceId()); this.checkVmsEqual(firstVm, fetchedVm); VirtualMachineScaleSetVM fetchedAsyncVm = virtualMachineScaleSetVMs.getInstanceAsync(firstVm.instanceId()).block(); this.checkVmsEqual(firstVm, fetchedAsyncVm); } @Test public void canCreateLowPriorityVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .withLowPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(-1.0) .create(); Assertions.assertEquals(vmss.virtualMachinePriority(), VirtualMachinePriorityTypes.LOW); Assertions.assertEquals(vmss.virtualMachineEvictionPolicy(), VirtualMachineEvictionPolicyTypes.DEALLOCATE); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) (-1.0)); vmss.update().withMaxPrice(2000.0).apply(); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) 2000.0); } @Test public void canListInstancesIncludingInstanceView() { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUpgradeMode(UpgradeMode.MANUAL) .withCapacity(3) .create(); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); List<VirtualMachineScaleSetVM> vmInstances = vmss.virtualMachines().list(null, VirtualMachineScaleSetVMExpandType.INSTANCE_VIEW).stream().collect(Collectors.toList()); Assertions.assertEquals(3, vmInstances.size()); List<PowerState> powerStates = vmInstances.stream().map(VirtualMachineScaleSetVM::powerState).collect(Collectors.toList()); Assertions.assertEquals(Arrays.asList(PowerState.RUNNING, PowerState.RUNNING, PowerState.RUNNING), powerStates); String firstInstanceId = vmInstances.get(0).instanceId(); computeManager.serviceClient().getVirtualMachineScaleSetVMs().deallocate(rgName, vmssName, firstInstanceId); vmInstances.get(0).refresh(); powerStates = vmInstances.stream().map(VirtualMachineScaleSetVM::powerState).collect(Collectors.toList()); Assertions.assertEquals(Arrays.asList(PowerState.DEALLOCATED, PowerState.RUNNING, PowerState.RUNNING), powerStates); VirtualMachineScaleSetVM vmInstance0 = vmss.virtualMachines().getInstance(firstInstanceId); Assertions.assertEquals(PowerState.DEALLOCATED, vmInstance0.powerState()); } @Test @Disabled("no longer works") public void canPerformSimulateEvictionOnSpotVMSSInstance() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups() .define(rgName) .withRegion(region) .create(); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSpotPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .create(); PagedIterable<VirtualMachineScaleSetVM> vmInstances = vmss.virtualMachines().list(); for (VirtualMachineScaleSetVM instance : vmInstances) { Assertions.assertTrue(instance.osDiskSizeInGB() > 0); vmss.virtualMachines().simulateEviction(instance.instanceId()); } boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); deallocated = true; for (VirtualMachineScaleSetVM instance : vmInstances) { instance.refresh(); if (instance.powerState() != PowerState.DEALLOCATED) { deallocated = false; } } if (deallocated) { break; } } Assertions.assertTrue(deallocated); for (VirtualMachineScaleSetVM instance : vmInstances) { instance.refresh(); Assertions.assertEquals(0, instance.osDiskSizeInGB()); } } private void checkVmsEqual(VirtualMachineScaleSetVM original, VirtualMachineScaleSetVM fetched) { Assertions.assertEquals(original.administratorUserName(), fetched.administratorUserName()); Assertions.assertEquals(original.availabilitySetId(), fetched.availabilitySetId()); Assertions.assertEquals(original.bootDiagnosticEnabled(), fetched.bootDiagnosticEnabled()); Assertions.assertEquals(original.bootDiagnosticStorageAccountUri(), fetched.bootDiagnosticStorageAccountUri()); Assertions.assertEquals(original.computerName(), fetched.computerName()); Assertions.assertEquals(original.dataDisks().size(), fetched.dataDisks().size()); Assertions.assertEquals(original.extensions().size(), fetched.extensions().size()); Assertions.assertEquals(original.instanceId(), fetched.instanceId()); Assertions.assertEquals(original.isLatestScaleSetUpdateApplied(), fetched.isLatestScaleSetUpdateApplied()); Assertions.assertEquals(original.isLinuxPasswordAuthenticationEnabled(), fetched.isLinuxPasswordAuthenticationEnabled()); Assertions.assertEquals(original.isManagedDiskEnabled(), fetched.isManagedDiskEnabled()); Assertions.assertEquals(original.isOSBasedOnCustomImage(), fetched.isOSBasedOnCustomImage()); Assertions.assertEquals(original.isOSBasedOnPlatformImage(), fetched.isOSBasedOnPlatformImage()); Assertions.assertEquals(original.isOSBasedOnStoredImage(), fetched.isOSBasedOnStoredImage()); Assertions.assertEquals(original.isWindowsAutoUpdateEnabled(), fetched.isWindowsAutoUpdateEnabled()); Assertions.assertEquals(original.isWindowsVMAgentProvisioned(), original.isWindowsVMAgentProvisioned()); Assertions.assertEquals(original.networkInterfaceIds().size(), fetched.networkInterfaceIds().size()); Assertions.assertEquals(original.osDiskCachingType(), fetched.osDiskCachingType()); Assertions.assertEquals(original.osDiskId(), fetched.osDiskId()); Assertions.assertEquals(original.osDiskName(), fetched.osDiskName()); Assertions.assertEquals(original.osDiskSizeInGB(), fetched.osDiskSizeInGB()); Assertions.assertEquals(original.osType(), fetched.osType()); Assertions.assertEquals(original.osUnmanagedDiskVhdUri(), fetched.osUnmanagedDiskVhdUri()); Assertions.assertEquals(original.powerState(), fetched.powerState()); Assertions.assertEquals(original.primaryNetworkInterfaceId(), fetched.primaryNetworkInterfaceId()); Assertions.assertEquals(original.size(), fetched.size()); Assertions.assertEquals(original.sku().name(), fetched.sku().name()); Assertions.assertEquals(original.storedImageUnmanagedVhdUri(), fetched.storedImageUnmanagedVhdUri()); Assertions.assertEquals(original.unmanagedDataDisks().size(), fetched.unmanagedDataDisks().size()); Assertions.assertEquals(original.windowsTimeZone(), fetched.windowsTimeZone()); } private void checkVMInstances(VirtualMachineScaleSet vmScaleSet) { VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = vmScaleSet.virtualMachines(); PagedIterable<VirtualMachineScaleSetVM> virtualMachines = virtualMachineScaleSetVMs.list(); Assertions.assertEquals(TestUtilities.getSize(virtualMachines), vmScaleSet.capacity()); Assertions.assertTrue(TestUtilities.getSize(virtualMachines) > 0); virtualMachineScaleSetVMs.updateInstances(virtualMachines.iterator().next().instanceId()); for (VirtualMachineScaleSetVM vm : virtualMachines) { Assertions.assertNotNull(vm.size()); Assertions.assertEquals(vm.osType(), OperatingSystemTypes.LINUX); Assertions.assertNotNull(vm.computerName().startsWith(vmScaleSet.computerNamePrefix())); Assertions.assertTrue(vm.isOSBasedOnPlatformImage()); Assertions.assertNull(vm.osDiskId()); Assertions.assertNotNull(vm.osUnmanagedDiskVhdUri()); Assertions.assertNull(vm.storedImageUnmanagedVhdUri()); Assertions.assertFalse(vm.isWindowsAutoUpdateEnabled()); Assertions.assertFalse(vm.isWindowsVMAgentProvisioned()); Assertions.assertTrue(vm.administratorUserName().equalsIgnoreCase("jvuser")); VirtualMachineImage vmImage = vm.getOSPlatformImage(); Assertions.assertNotNull(vmImage); Assertions.assertEquals(vm.extensions().size(), vmScaleSet.extensions().size()); Assertions.assertNotNull(vm.powerState()); vm.refreshInstanceView(); } VirtualMachineScaleSetVM virtualMachineScaleSetVM = virtualMachines.iterator().next(); Assertions.assertNotNull(virtualMachineScaleSetVM); virtualMachineScaleSetVM.restart(); virtualMachineScaleSetVM.powerOff(); virtualMachineScaleSetVM.refreshInstanceView(); Assertions.assertEquals(virtualMachineScaleSetVM.powerState(), PowerState.STOPPED); virtualMachineScaleSetVM.start(); for (VirtualMachineScaleSetVM vm : virtualMachines) { PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = vmScaleSet.listNetworkInterfacesByInstanceId(vm.instanceId()); Assertions.assertNotNull(nics); Assertions.assertEquals(TestUtilities.getSize(nics), 1); VirtualMachineScaleSetNetworkInterface nic = nics.iterator().next(); Assertions.assertNotNull(nic.virtualMachineId()); Assertions.assertTrue(nic.virtualMachineId().toLowerCase().equalsIgnoreCase(vm.id())); Assertions.assertNotNull(vm.listNetworkInterfaces()); VirtualMachineScaleSetNetworkInterface nicA = vmScaleSet.getNetworkInterfaceByInstanceId(vm.instanceId(), nic.name()); Assertions.assertNotNull(nicA); VirtualMachineScaleSetNetworkInterface nicB = vm.getNetworkInterface(nic.name()); String nicIdB = vm.getNetworkInterfaceAsync(nic.name()).map(n -> nic.primaryIPConfiguration().networkId()).block(); Assertions.assertNotNull(nicB); Assertions.assertNotNull(nicIdB); } } @Test public void testVirtualMachineScaleSetSkuTypes() { rgName = null; VirtualMachineScaleSetSkuTypes skuType = VirtualMachineScaleSetSkuTypes.STANDARD_A0; Assertions.assertNull(skuType.sku().capacity()); Sku sku1 = skuType.sku(); Assertions.assertNull(sku1.capacity()); sku1.withCapacity(1L); Assertions.assertEquals(sku1.capacity().longValue(), 1); Assertions.assertNull(skuType.sku().capacity()); Sku sku2 = skuType.sku(); Assertions.assertNull(sku2.capacity()); sku2.withCapacity(2L); Assertions.assertEquals(sku2.capacity().longValue(), 2); Assertions.assertNull(skuType.sku().capacity()); Assertions.assertEquals(sku1.capacity().longValue(), 1); } @Test public void canDeleteVMSSInstance() throws Exception { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(4) .create(); Assertions.assertEquals(4, vmss.virtualMachines().list().stream().count()); List<String> firstTwoIds = vmss.virtualMachines().list().stream() .limit(2) .map(VirtualMachineScaleSetVM::instanceId) .collect(Collectors.toList()); vmss.virtualMachines().deleteInstances(firstTwoIds, true); Assertions.assertEquals(2, vmss.virtualMachines().list().stream().count()); vmss.virtualMachines().deleteInstances(Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); Assertions.assertEquals(1, vmss.virtualMachines().list().stream().count()); computeManager.virtualMachineScaleSets().deleteInstances(rgName, vmssName, Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); } @Test public void canCreateFlexibleVMSS() throws Exception { VirtualMachineScaleSetInner options = new VirtualMachineScaleSetInner(); options.withOrchestrationMode(OrchestrationMode.FLEXIBLE) .withPlatformFaultDomainCount(1) .withLocation(region.name()); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(region.name()) .create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); final String vmssName = generateRandomResourceName("vmss", 10); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region.name()) .withExistingResourceGroup(resourceGroup) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.CENTOS_8_3) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNotNull(vmss.innerModel().virtualMachineProfile()); Assertions.assertNotNull(vmss.getPrimaryInternetFacingLoadBalancer()); Assertions.assertNotNull(vmss.getPrimaryNetwork()); Assertions.assertEquals(vmss.orchestrationMode(), OrchestrationMode.FLEXIBLE); } @Test public void canUpdateVMSSInCreateOrUpdateMode() throws Exception { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); VirtualMachineScaleSet vmss = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(vmss.orchestrationMode(), OrchestrationMode.FLEXIBLE); Assertions.assertNull(vmss.innerModel().virtualMachineProfile()); Assertions.assertNull(vmss.getPrimaryNetwork()); Assertions.assertNull(vmss.storageProfile()); Assertions.assertNull(vmss.networkProfile()); Assertions.assertNull(vmss.virtualMachinePublicIpConfig()); Assertions.assertEquals(vmss.applicationGatewayBackendAddressPoolsIds().size(), 0); Assertions.assertEquals(vmss.applicationSecurityGroupIds().size(), 0); Assertions.assertNull(vmss.billingProfile()); Assertions.assertNull(vmss.bootDiagnosticsStorageUri()); Assertions.assertNull(vmss.getPrimaryInternalLoadBalancer()); Assertions.assertEquals(vmss.vhdContainers().size(), 0); Assertions.assertEquals(vmss.listPrimaryInternalLoadBalancerBackends().size(), 0); Assertions.assertEquals(vmss.listPrimaryInternalLoadBalancerInboundNatPools().size(), 0); Assertions.assertEquals(vmss.listPrimaryInternetFacingLoadBalancerBackends().size(), 0); Assertions.assertEquals(vmss.listPrimaryInternetFacingLoadBalancerInboundNatPools().size(), 0); Assertions.assertEquals(vmss.primaryPublicIpAddressIds().size(), 0); Assertions.assertFalse(vmss.isAcceleratedNetworkingEnabled()); Assertions.assertFalse(vmss.isBootDiagnosticsEnabled()); Assertions.assertFalse(vmss.isIpForwardingEnabled()); Assertions.assertNull(vmss.networkSecurityGroupId()); Assertions.assertFalse(vmss.isManagedDiskEnabled()); vmss.update() .withTag("tag1", "value1") .apply(); Assertions.assertNotNull(vmss.tags()); Assertions.assertEquals(vmss.tags().get("tag1"), "value1"); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(Region.fromName(euapRegion), resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); final String vmssVmDnsLabel = generateRandomResourceName("pip", 10); vmss = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withVirtualMachinePublicIp(vmssVmDnsLabel) .create(); Assertions.assertNotNull(vmss.innerModel().virtualMachineProfile()); Assertions.assertEquals(vmss.orchestrationMode(), OrchestrationMode.FLEXIBLE); Assertions.assertNotNull(vmss.getPrimaryInternetFacingLoadBalancer()); Assertions.assertNotNull(vmss.getPrimaryNetwork()); vmss = this.computeManager .virtualMachineScaleSets() .getById(vmss.id()); Assertions.assertNotNull(vmss); vmss.update() .withTag("tag1", "value2") .apply(); Assertions.assertNotNull(vmss.innerModel().virtualMachineProfile()); Assertions.assertNotNull(vmss.tags()); Assertions.assertEquals(vmss.tags().get("tag1"), "value2"); Assertions.assertNotNull(vmss.getPrimaryNetwork()); Assertions.assertNotNull(vmss.storageProfile()); Assertions.assertNotNull(vmss.networkProfile()); Assertions.assertNotNull(vmss.virtualMachinePublicIpConfig()); Assertions.assertNotEquals(vmss.listPrimaryInternetFacingLoadBalancerBackends().size(), 0); Assertions.assertEquals(vmss.listPrimaryInternetFacingLoadBalancerInboundNatPools().size(), 0); Assertions.assertNotEquals(vmss.primaryPublicIpAddressIds().size(), 0); } @Test public void canGetOrchestrationType() { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertEquals(vmss.orchestrationMode(), OrchestrationMode.UNIFORM); final String vmssName2 = generateRandomResourceName("vmss", 10); VirtualMachineScaleSet vmss2 = this .computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(euapRegion) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertNotNull(vmss2); Assertions.assertEquals(vmss2.orchestrationMode(), OrchestrationMode.FLEXIBLE); } @Test public void npeProtectionTest() throws Exception { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); VirtualMachineScaleSet vmss = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withFlexibleOrchestrationMode() .create(); String excludeMethodsString = "start,startAsync,reimage,reimageAsync,deallocate,deallocateAsync,powerOff,powerOffAsync,restart,restartAsync"; Set<String> excludeMethods = new HashSet<>(Arrays.asList(excludeMethodsString.split(","))); Set<String> invoked = new HashSet<>(); for (Method method : VirtualMachineScaleSet.class.getDeclaredMethods()) { if (!excludeMethods.contains(method.getName()) && method.getParameterCount() == 0) { method.invoke(vmss); invoked.add(method.getName()); } } Assertions.assertTrue(invoked.contains("isEphemeralOSDisk")); Assertions.assertFalse(invoked.contains("start")); } @Test public void canBatchOperateVMSSInstance() { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUpgradeMode(UpgradeMode.AUTOMATIC) .withCapacity(3) .create(); List<VirtualMachineScaleSetVM> instances = vmss.virtualMachines().list().stream().collect(Collectors.toList()); Assertions.assertEquals(3, instances.size()); Collection<String> instanceIds = Arrays.asList(instances.get(0).instanceId(), instances.get(2).instanceId()); VirtualMachineScaleSetVMs vmInstances = vmss.virtualMachines(); VirtualMachineScaleSetVM vmInstance2 = vmss.virtualMachines().getInstance(instances.get(2).instanceId()); vmInstances.redeployInstances(instanceIds); vmInstances.powerOffInstances(instanceIds, true); vmInstance2.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vmInstance2.powerState()); vmInstances.startInstances(instanceIds); vmInstance2.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vmInstance2.powerState()); vmInstances.restartInstances(instanceIds); vmInstance2.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vmInstance2.powerState()); vmInstances.deallocateInstances(instanceIds); vmInstance2.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vmInstance2.powerState()); VirtualMachineScaleSetVM vmInstance1 = vmss.virtualMachines().getInstance(instances.get(1).instanceId()); Assertions.assertEquals(PowerState.RUNNING, vmInstance1.powerState()); } @Test }
class VirtualMachineScaleSetOperationsTests extends ComputeManagementTest { private String rgName = ""; private final Region region = Region.US_WEST; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test public void canCreateVMSSWithPlan() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); final String uname = "jvuser"; Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); PurchasePlan plan = new PurchasePlan() .withName("access_server_byol") .withPublisher("openvpn") .withProduct("openvpnas"); ImageReference imageReference = new ImageReference() .withPublisher("openvpn") .withOffer("openvpnas") .withSku("access_server_byol") .withVersion("latest"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withSpecificLinuxImageVersion(imageReference) .withRootUsername(uname) .withSsh(sshPublicKey()) .withNewDataDisk(1) .withPlan(plan) .create(); VirtualMachineScaleSet currentVirtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertEquals("access_server_byol", currentVirtualMachineScaleSet.plan().name()); Assertions.assertEquals("openvpn", currentVirtualMachineScaleSet.plan().publisher()); Assertions.assertEquals("openvpnas", currentVirtualMachineScaleSet.plan().product()); } @Test public void canUpdateVirtualMachineScaleSetWithExtensionProtectedSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("stg", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); List<StorageAccountKey> keys = storageAccount.getKeys(); Assertions.assertNotNull(keys); Assertions.assertTrue(keys.size() > 0); String storageAccountKey = keys.get(0).value(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withExistingStorageAccount(storageAccount) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .withProtectedSetting("storageAccountName", storageAccount.name()) .withProtectedSetting("storageAccountKey", storageAccountKey) .attach() .create(); Map<String, VirtualMachineScaleSetExtension> extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); VirtualMachineScaleSetExtension extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); VirtualMachineScaleSet scaleSet = this.computeManager.virtualMachineScaleSets().getById(virtualMachineScaleSet.id()); extensions = scaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); int newCapacity = scaleSet.capacity() + 1; virtualMachineScaleSet.update().withCapacity(newCapacity).apply(); extensions = virtualMachineScaleSet.extensions(); Assertions.assertNotNull(extensions); Assertions.assertTrue(extensions.size() > 0); Assertions.assertTrue(extensions.containsKey("CustomScriptForLinux")); extension = extensions.get("CustomScriptForLinux"); Assertions.assertNotNull(extension.publicSettings()); Assertions.assertEquals(1, extension.publicSettings().size()); Assertions.assertNotNull(extension.publicSettingsAsJsonString()); } @Test public void canCreateVirtualMachineScaleSetWithCustomScriptExtension() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String uname = "jvuser"; final String password = password(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define(generateRandomResourceName("vmssvnet", 15)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1"); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(uname) .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("commandToExecute", "ls") .attach() .withUpgradeMode(UpgradeMode.MANUAL) .create(); checkVMInstances(virtualMachineScaleSet); List<String> publicIPAddressIds = virtualMachineScaleSet.primaryPublicIpAddressIds(); PublicIpAddress publicIPAddress = this.networkManager.publicIpAddresses().getById(publicIPAddressIds.get(0)); String fqdn = publicIPAddress.fqdn(); Assertions.assertNotNull(fqdn); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { PagedIterable<VirtualMachineScaleSetNetworkInterface> networkInterfaces = vm.listNetworkInterfaces(); Assertions.assertEquals(TestUtilities.getSize(networkInterfaces), 1); VirtualMachineScaleSetNetworkInterface networkInterface = networkInterfaces.iterator().next(); VirtualMachineScaleSetNicIpConfiguration primaryIpConfig = null; primaryIpConfig = networkInterface.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Integer sshFrontendPort = null; List<LoadBalancerInboundNatRule> natRules = primaryIpConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule natRule : natRules) { if (natRule.backendPort() == 22) { sshFrontendPort = natRule.frontendPort(); break; } } Assertions.assertNotNull(sshFrontendPort); } } @Test public void canCreateVirtualMachineScaleSetWithOptionalNetworkSettings() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vmssVmDnsLabel = generateRandomResourceName("pip", 10); final String nsgName = generateRandomResourceName("nsg", 10); final String asgName = generateRandomResourceName("asg", 8); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ApplicationSecurityGroup asg = this .networkManager .applicationSecurityGroups() .define(asgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withVirtualMachinePublicIp(vmssVmDnsLabel) .withExistingApplicationSecurityGroup(asg) .create(); VirtualMachineScaleSetPublicIpAddressConfiguration currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.dnsSettings()); Assertions.assertNotNull(currentIpConfig.dnsSettings().domainNameLabel()); currentIpConfig.withIdleTimeoutInMinutes(20); virtualMachineScaleSet.update().withVirtualMachinePublicIp(currentIpConfig).apply(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); virtualMachineScaleSet.refresh(); currentIpConfig = virtualMachineScaleSet.virtualMachinePublicIpConfig(); Assertions.assertNotNull(currentIpConfig); Assertions.assertNotNull(currentIpConfig.idleTimeoutInMinutes()); Assertions.assertEquals((long) 20, (long) currentIpConfig.idleTimeoutInMinutes()); List<String> asgIds = virtualMachineScaleSet.applicationSecurityGroupIds(); Assertions.assertNotNull(asgIds); Assertions.assertEquals(1, asgIds.size()); NetworkSecurityGroup nsg = networkManager .networkSecurityGroups() .define(nsgName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineRule("rule1") .allowOutbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); virtualMachineScaleSet.deallocate(); virtualMachineScaleSet .update() .withIpForwarding() .withAcceleratedNetworking() .withExistingNetworkSecurityGroup(nsg) .apply(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet.refresh(); Assertions.assertTrue(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertTrue(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNotNull(virtualMachineScaleSet.networkSecurityGroupId()); virtualMachineScaleSet .update() .withoutIpForwarding() .withoutAcceleratedNetworking() .withoutNetworkSecurityGroup() .apply(); Assertions.assertFalse(virtualMachineScaleSet.isIpForwardingEnabled()); Assertions.assertFalse(virtualMachineScaleSet.isAcceleratedNetworkingEnabled()); Assertions.assertNull(virtualMachineScaleSet.networkSecurityGroupId()); } @Test @Disabled("Mock framework doesn't support data plane") public void canCreateVirtualMachineScaleSetWithSecret() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); final String vaultName = generateRandomResourceName("vlt", 10); final String secretName = generateRandomResourceName("srt", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); Vault vault = this .keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowSecretAllPermissions() .attach() .withDeploymentEnabled() .create(); final InputStream embeddedJsonConfig = this.getClass().getResourceAsStream("/myTest.txt"); String secretValue = IOUtils.toString(embeddedJsonConfig, StandardCharsets.UTF_8); Secret secret = vault.secrets().define(secretName).withValue(secretValue).create(); List<VaultCertificate> certs = new ArrayList<>(); certs.add(new VaultCertificate().withCertificateUrl(secret.id())); List<VaultSecretGroup> group = new ArrayList<>(); group .add( new VaultSecretGroup() .withSourceVault(new SubResource().withId(vault.id())) .withVaultCertificates(certs)); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSecrets(group) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() > 0); } virtualMachineScaleSet.update().withoutSecrets().apply(); for (VirtualMachineScaleSetVM vm : virtualMachineScaleSet.virtualMachines().list()) { Assertions.assertTrue(vm.osProfile().secrets().size() == 0); } } @Test public void canCreateVirtualMachineScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .create(); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = virtualMachineScaleSet.listNetworkInterfaces(); int nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Assertions.assertNotNull(nic.id()); Assertions .assertTrue(nic.virtualMachineId().toLowerCase().startsWith(virtualMachineScaleSet.id().toLowerCase())); Assertions.assertNotNull(nic.macAddress()); Assertions.assertNotNull(nic.dnsServers()); Assertions.assertNotNull(nic.appliedDnsServers()); Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); Assertions.assertTrue(ipConfig.isPrimary()); Assertions.assertNotNull(ipConfig.subnetName()); Assertions.assertTrue(primaryNetwork.id().toLowerCase().equalsIgnoreCase(ipConfig.networkId())); Assertions.assertNotNull(ipConfig.privateIpAddress()); Assertions.assertNotNull(ipConfig.privateIpAddressVersion()); Assertions.assertNotNull(ipConfig.privateIpAllocationMethod()); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertEquals(lbBackends.size(), 2); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); Assertions.assertEquals(lbNatRules.size(), 2); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099)); Assertions.assertTrue(lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 23); } } } Assertions.assertTrue(nicCount > 0); Assertions.assertEquals(virtualMachineScaleSet.vhdContainers().size(), 2); Assertions.assertEquals(virtualMachineScaleSet.sku(), VirtualMachineScaleSetSkuTypes.STANDARD_A0); Assertions.assertTrue(virtualMachineScaleSet.upgradeModel() == UpgradeMode.AUTOMATIC); Assertions.assertEquals(virtualMachineScaleSet.capacity(), 2); primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); String inboundNatPoolToRemove = null; for (String inboundNatPoolName : virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().keySet()) { inboundNatPoolToRemove = inboundNatPoolName; break; } LoadBalancer internalLoadBalancer = createInternalLoadBalancer(region, resourceGroup, primaryNetwork, "1"); virtualMachineScaleSet .update() .withExistingPrimaryInternalLoadBalancer(internalLoadBalancer) .withoutPrimaryInternetFacingLoadBalancerNatPools(inboundNatPoolToRemove) .apply(); virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 1); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 2); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 2); nics = virtualMachineScaleSet.listNetworkInterfaces(); nicCount = 0; for (VirtualMachineScaleSetNetworkInterface nic : nics) { nicCount++; Map<String, VirtualMachineScaleSetNicIpConfiguration> ipConfigs = nic.ipConfigurations(); Assertions.assertEquals(ipConfigs.size(), 1); for (Map.Entry<String, VirtualMachineScaleSetNicIpConfiguration> entry : ipConfigs.entrySet()) { VirtualMachineScaleSetNicIpConfiguration ipConfig = entry.getValue(); Assertions.assertNotNull(ipConfig); List<LoadBalancerBackend> lbBackends = ipConfig.listAssociatedLoadBalancerBackends(); Assertions.assertNotNull(lbBackends); for (LoadBalancerBackend lbBackend : lbBackends) { Map<String, LoadBalancingRule> lbRules = lbBackend.loadBalancingRules(); Assertions.assertEquals(lbRules.size(), 1); for (Map.Entry<String, LoadBalancingRule> ruleEntry : lbRules.entrySet()) { LoadBalancingRule rule = ruleEntry.getValue(); Assertions.assertNotNull(rule); Assertions .assertTrue( (rule.frontendPort() == 80 && rule.backendPort() == 80) || (rule.frontendPort() == 443 && rule.backendPort() == 443) || (rule.frontendPort() == 1000 && rule.backendPort() == 1000) || (rule.frontendPort() == 1001 && rule.backendPort() == 1001)); } } List<LoadBalancerInboundNatRule> lbNatRules = ipConfig.listAssociatedLoadBalancerInboundNatRules(); for (LoadBalancerInboundNatRule lbNatRule : lbNatRules) { Assertions .assertTrue( (lbNatRule.frontendPort() >= 6000 && lbNatRule.frontendPort() <= 6099) || (lbNatRule.frontendPort() >= 5000 && lbNatRule.frontendPort() <= 5099) || (lbNatRule.frontendPort() >= 8000 && lbNatRule.frontendPort() <= 8099) || (lbNatRule.frontendPort() >= 9000 && lbNatRule.frontendPort() <= 9099)); Assertions .assertTrue( lbNatRule.backendPort() == 23 || lbNatRule.backendPort() == 22 || lbNatRule.backendPort() == 44 || lbNatRule.backendPort() == 45); } } } Assertions.assertTrue(nicCount > 0); } /* * Previously name * canCreateTwoRegionalVirtualMachineScaleSetsAndAssociateEachWithDifferentBackendPoolOfZoneResilientLoadBalancer * but this was too long for some OSes and would cause git checkout to fail. */ @Test public void canCreateTwoRegionalVMScaleSetsWithDifferentPoolOfZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); List<String> natpools = new ArrayList<>(); for (String natPool : publicLoadBalancer.inboundNatPools().keySet()) { natpools.add(natPool); } Assertions.assertTrue(natpools.size() == 2); final String vmssName1 = generateRandomResourceName("vmss1", 10); VirtualMachineScaleSet virtualMachineScaleSet1 = this .computeManager .virtualMachineScaleSets() .define(vmssName1) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(0)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); final String vmssName2 = generateRandomResourceName("vmss2", 10); VirtualMachineScaleSet virtualMachineScaleSet2 = this .computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(1)) .withPrimaryInternetFacingLoadBalancerInboundNatPools(natpools.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(virtualMachineScaleSet1.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet1.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet1.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); Assertions.assertNull(virtualMachineScaleSet2.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet2.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet2.listPrimaryInternetFacingLoadBalancerBackends().size() == 1); } @Test public void canCreateZoneRedundantVirtualMachineScaleSetWithZoneResilientLoadBalancer() throws Exception { Region region2 = Region.US_EAST2; ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region2).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region2, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); final String vmssName = generateRandomResourceName("vmss", 10); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region2) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withAvailabilityZone(AvailabilityZoneId.ZONE_1) .withAvailabilityZone(AvailabilityZoneId.ZONE_2) .create(); Assertions.assertNotNull(virtualMachineScaleSet.availabilityZones()); Assertions.assertEquals(2, virtualMachineScaleSet.availabilityZones().size()); Assertions.assertNull(virtualMachineScaleSet.getPrimaryInternalLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerBackends().size() == 0); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternalLoadBalancerInboundNatPools().size() == 0); Assertions.assertNotNull(virtualMachineScaleSet.getPrimaryInternetFacingLoadBalancer()); Assertions.assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerBackends().size() == 2); Assertions .assertTrue(virtualMachineScaleSet.listPrimaryInternetFacingLoadBalancerInboundNatPools().size() == 2); Network primaryNetwork = virtualMachineScaleSet.getPrimaryNetwork(); Assertions.assertNotNull(primaryNetwork.id()); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithoutRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .create(); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertFalse( found, "Resource group should not have a role assignment with virtual machine scale set MSI principal"); } @Test public void canEnableMSIOnVirtualMachineScaleSetWithMultipleRoleAssignment() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); StorageAccount storageAccount = this .storageManager .storageAccounts() .define(generateRandomResourceName("jvcsrg", 10)) .withRegion(region) .withExistingResourceGroup(resourceGroup) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSystemAssignedManagedServiceIdentity() .withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole.CONTRIBUTOR) .withSystemAssignedIdentityBasedAccessTo(storageAccount.id(), BuiltInRole.CONTRIBUTOR) .create(); Assertions.assertNotNull(virtualMachineScaleSet.managedServiceIdentityType()); Assertions .assertTrue( virtualMachineScaleSet.managedServiceIdentityType().equals(ResourceIdentityType.SYSTEM_ASSIGNED)); PagedIterable<RoleAssignment> rgRoleAssignments = authorizationManager.roleAssignments().listByScope(resourceGroup.id()); Assertions.assertNotNull(rgRoleAssignments); boolean found = false; for (RoleAssignment roleAssignment : rgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Resource group should have a role assignment with virtual machine scale set MSI principal"); PagedIterable<RoleAssignment> stgRoleAssignments = authorizationManager.roleAssignments().listByScope(storageAccount.id()); Assertions.assertNotNull(stgRoleAssignments); found = false; for (RoleAssignment roleAssignment : stgRoleAssignments) { if (roleAssignment.principalId() != null && roleAssignment .principalId() .equalsIgnoreCase(virtualMachineScaleSet.systemAssignedManagedServiceIdentityPrincipalId())) { found = true; break; } } Assertions .assertTrue( found, "Storage account should have a role assignment with virtual machine scale set MSI principal"); } @Test public void canGetSingleVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.BASIC); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .create(); VirtualMachineScaleSet virtualMachineScaleSet = this.computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = virtualMachineScaleSet.virtualMachines(); VirtualMachineScaleSetVM firstVm = virtualMachineScaleSetVMs.list().iterator().next(); VirtualMachineScaleSetVM fetchedVm = virtualMachineScaleSetVMs.getInstance(firstVm.instanceId()); this.checkVmsEqual(firstVm, fetchedVm); VirtualMachineScaleSetVM fetchedAsyncVm = virtualMachineScaleSetVMs.getInstanceAsync(firstVm.instanceId()).block(); this.checkVmsEqual(firstVm, fetchedAsyncVm); } @Test public void canCreateLowPriorityVMSSInstance() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createInternetFacingLoadBalancer(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD); List<String> backends = new ArrayList<>(); for (String backend : publicLoadBalancer.backends().keySet()) { backends.add(backend); } Assertions.assertTrue(backends.size() == 2); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withPrimaryInternetFacingLoadBalancerBackends(backends.get(0), backends.get(1)) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewStorageAccount(generateRandomResourceName("stg", 15)) .withNewStorageAccount(generateRandomResourceName("stg3", 15)) .withUpgradeMode(UpgradeMode.MANUAL) .withLowPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(-1.0) .create(); Assertions.assertEquals(vmss.virtualMachinePriority(), VirtualMachinePriorityTypes.LOW); Assertions.assertEquals(vmss.virtualMachineEvictionPolicy(), VirtualMachineEvictionPolicyTypes.DEALLOCATE); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) (-1.0)); vmss.update().withMaxPrice(2000.0).apply(); Assertions.assertEquals(vmss.billingProfile().maxPrice(), (Double) 2000.0); } @Test public void canListInstancesIncludingInstanceView() { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUpgradeMode(UpgradeMode.MANUAL) .withCapacity(3) .create(); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets().getByResourceGroup(rgName, vmssName); List<VirtualMachineScaleSetVM> vmInstances = vmss.virtualMachines().list(null, VirtualMachineScaleSetVMExpandType.INSTANCE_VIEW).stream().collect(Collectors.toList()); Assertions.assertEquals(3, vmInstances.size()); List<PowerState> powerStates = vmInstances.stream().map(VirtualMachineScaleSetVM::powerState).collect(Collectors.toList()); Assertions.assertEquals(Arrays.asList(PowerState.RUNNING, PowerState.RUNNING, PowerState.RUNNING), powerStates); String firstInstanceId = vmInstances.get(0).instanceId(); computeManager.serviceClient().getVirtualMachineScaleSetVMs().deallocate(rgName, vmssName, firstInstanceId); vmInstances.get(0).refresh(); powerStates = vmInstances.stream().map(VirtualMachineScaleSetVM::powerState).collect(Collectors.toList()); Assertions.assertEquals(Arrays.asList(PowerState.DEALLOCATED, PowerState.RUNNING, PowerState.RUNNING), powerStates); VirtualMachineScaleSetVM vmInstance0 = vmss.virtualMachines().getInstance(firstInstanceId); Assertions.assertEquals(PowerState.DEALLOCATED, vmInstance0.powerState()); } @Test @Disabled("no longer works") public void canPerformSimulateEvictionOnSpotVMSSInstance() { final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups() .define(rgName) .withRegion(region) .create(); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_D3_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withSpotPriorityVirtualMachine(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .create(); PagedIterable<VirtualMachineScaleSetVM> vmInstances = vmss.virtualMachines().list(); for (VirtualMachineScaleSetVM instance : vmInstances) { Assertions.assertTrue(instance.osDiskSizeInGB() > 0); vmss.virtualMachines().simulateEviction(instance.instanceId()); } boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); deallocated = true; for (VirtualMachineScaleSetVM instance : vmInstances) { instance.refresh(); if (instance.powerState() != PowerState.DEALLOCATED) { deallocated = false; } } if (deallocated) { break; } } Assertions.assertTrue(deallocated); for (VirtualMachineScaleSetVM instance : vmInstances) { instance.refresh(); Assertions.assertEquals(0, instance.osDiskSizeInGB()); } } private void checkVmsEqual(VirtualMachineScaleSetVM original, VirtualMachineScaleSetVM fetched) { Assertions.assertEquals(original.administratorUserName(), fetched.administratorUserName()); Assertions.assertEquals(original.availabilitySetId(), fetched.availabilitySetId()); Assertions.assertEquals(original.bootDiagnosticEnabled(), fetched.bootDiagnosticEnabled()); Assertions.assertEquals(original.bootDiagnosticStorageAccountUri(), fetched.bootDiagnosticStorageAccountUri()); Assertions.assertEquals(original.computerName(), fetched.computerName()); Assertions.assertEquals(original.dataDisks().size(), fetched.dataDisks().size()); Assertions.assertEquals(original.extensions().size(), fetched.extensions().size()); Assertions.assertEquals(original.instanceId(), fetched.instanceId()); Assertions.assertEquals(original.isLatestScaleSetUpdateApplied(), fetched.isLatestScaleSetUpdateApplied()); Assertions.assertEquals(original.isLinuxPasswordAuthenticationEnabled(), fetched.isLinuxPasswordAuthenticationEnabled()); Assertions.assertEquals(original.isManagedDiskEnabled(), fetched.isManagedDiskEnabled()); Assertions.assertEquals(original.isOSBasedOnCustomImage(), fetched.isOSBasedOnCustomImage()); Assertions.assertEquals(original.isOSBasedOnPlatformImage(), fetched.isOSBasedOnPlatformImage()); Assertions.assertEquals(original.isOSBasedOnStoredImage(), fetched.isOSBasedOnStoredImage()); Assertions.assertEquals(original.isWindowsAutoUpdateEnabled(), fetched.isWindowsAutoUpdateEnabled()); Assertions.assertEquals(original.isWindowsVMAgentProvisioned(), original.isWindowsVMAgentProvisioned()); Assertions.assertEquals(original.networkInterfaceIds().size(), fetched.networkInterfaceIds().size()); Assertions.assertEquals(original.osDiskCachingType(), fetched.osDiskCachingType()); Assertions.assertEquals(original.osDiskId(), fetched.osDiskId()); Assertions.assertEquals(original.osDiskName(), fetched.osDiskName()); Assertions.assertEquals(original.osDiskSizeInGB(), fetched.osDiskSizeInGB()); Assertions.assertEquals(original.osType(), fetched.osType()); Assertions.assertEquals(original.osUnmanagedDiskVhdUri(), fetched.osUnmanagedDiskVhdUri()); Assertions.assertEquals(original.powerState(), fetched.powerState()); Assertions.assertEquals(original.primaryNetworkInterfaceId(), fetched.primaryNetworkInterfaceId()); Assertions.assertEquals(original.size(), fetched.size()); Assertions.assertEquals(original.sku().name(), fetched.sku().name()); Assertions.assertEquals(original.storedImageUnmanagedVhdUri(), fetched.storedImageUnmanagedVhdUri()); Assertions.assertEquals(original.unmanagedDataDisks().size(), fetched.unmanagedDataDisks().size()); Assertions.assertEquals(original.windowsTimeZone(), fetched.windowsTimeZone()); } private void checkVMInstances(VirtualMachineScaleSet vmScaleSet) { VirtualMachineScaleSetVMs virtualMachineScaleSetVMs = vmScaleSet.virtualMachines(); PagedIterable<VirtualMachineScaleSetVM> virtualMachines = virtualMachineScaleSetVMs.list(); Assertions.assertEquals(TestUtilities.getSize(virtualMachines), vmScaleSet.capacity()); Assertions.assertTrue(TestUtilities.getSize(virtualMachines) > 0); virtualMachineScaleSetVMs.updateInstances(virtualMachines.iterator().next().instanceId()); for (VirtualMachineScaleSetVM vm : virtualMachines) { Assertions.assertNotNull(vm.size()); Assertions.assertEquals(vm.osType(), OperatingSystemTypes.LINUX); Assertions.assertNotNull(vm.computerName().startsWith(vmScaleSet.computerNamePrefix())); Assertions.assertTrue(vm.isOSBasedOnPlatformImage()); Assertions.assertNull(vm.osDiskId()); Assertions.assertNotNull(vm.osUnmanagedDiskVhdUri()); Assertions.assertNull(vm.storedImageUnmanagedVhdUri()); Assertions.assertFalse(vm.isWindowsAutoUpdateEnabled()); Assertions.assertFalse(vm.isWindowsVMAgentProvisioned()); Assertions.assertTrue(vm.administratorUserName().equalsIgnoreCase("jvuser")); VirtualMachineImage vmImage = vm.getOSPlatformImage(); Assertions.assertNotNull(vmImage); Assertions.assertEquals(vm.extensions().size(), vmScaleSet.extensions().size()); Assertions.assertNotNull(vm.powerState()); vm.refreshInstanceView(); } VirtualMachineScaleSetVM virtualMachineScaleSetVM = virtualMachines.iterator().next(); Assertions.assertNotNull(virtualMachineScaleSetVM); virtualMachineScaleSetVM.restart(); virtualMachineScaleSetVM.powerOff(); virtualMachineScaleSetVM.refreshInstanceView(); Assertions.assertEquals(virtualMachineScaleSetVM.powerState(), PowerState.STOPPED); virtualMachineScaleSetVM.start(); for (VirtualMachineScaleSetVM vm : virtualMachines) { PagedIterable<VirtualMachineScaleSetNetworkInterface> nics = vmScaleSet.listNetworkInterfacesByInstanceId(vm.instanceId()); Assertions.assertNotNull(nics); Assertions.assertEquals(TestUtilities.getSize(nics), 1); VirtualMachineScaleSetNetworkInterface nic = nics.iterator().next(); Assertions.assertNotNull(nic.virtualMachineId()); Assertions.assertTrue(nic.virtualMachineId().toLowerCase().equalsIgnoreCase(vm.id())); Assertions.assertNotNull(vm.listNetworkInterfaces()); VirtualMachineScaleSetNetworkInterface nicA = vmScaleSet.getNetworkInterfaceByInstanceId(vm.instanceId(), nic.name()); Assertions.assertNotNull(nicA); VirtualMachineScaleSetNetworkInterface nicB = vm.getNetworkInterface(nic.name()); String nicIdB = vm.getNetworkInterfaceAsync(nic.name()).map(n -> nic.primaryIPConfiguration().networkId()).block(); Assertions.assertNotNull(nicB); Assertions.assertNotNull(nicIdB); } } @Test public void testVirtualMachineScaleSetSkuTypes() { rgName = null; VirtualMachineScaleSetSkuTypes skuType = VirtualMachineScaleSetSkuTypes.STANDARD_A0; Assertions.assertNull(skuType.sku().capacity()); Sku sku1 = skuType.sku(); Assertions.assertNull(sku1.capacity()); sku1.withCapacity(1L); Assertions.assertEquals(sku1.capacity().longValue(), 1); Assertions.assertNull(skuType.sku().capacity()); Sku sku2 = skuType.sku(); Assertions.assertNull(sku2.capacity()); sku2.withCapacity(2L); Assertions.assertEquals(sku2.capacity().longValue(), 2); Assertions.assertNull(skuType.sku().capacity()); Assertions.assertEquals(sku1.capacity().longValue(), 1); } @Test public void canDeleteVMSSInstance() throws Exception { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(4) .create(); Assertions.assertEquals(4, vmss.virtualMachines().list().stream().count()); List<String> firstTwoIds = vmss.virtualMachines().list().stream() .limit(2) .map(VirtualMachineScaleSetVM::instanceId) .collect(Collectors.toList()); vmss.virtualMachines().deleteInstances(firstTwoIds, true); Assertions.assertEquals(2, vmss.virtualMachines().list().stream().count()); vmss.virtualMachines().deleteInstances(Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); Assertions.assertEquals(1, vmss.virtualMachines().list().stream().count()); computeManager.virtualMachineScaleSets().deleteInstances(rgName, vmssName, Collections.singleton(vmss.virtualMachines().list().stream().findFirst().get().instanceId()), false); } @Test public void canCreateFlexibleVMSS() throws Exception { VirtualMachineScaleSetInner options = new VirtualMachineScaleSetInner(); options.withOrchestrationMode(OrchestrationMode.FLEXIBLE) .withPlatformFaultDomainCount(1) .withLocation(region.name()); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(region.name()) .create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); final String vmssName = generateRandomResourceName("vmss", 10); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region.name()) .withExistingResourceGroup(resourceGroup) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.CENTOS_8_3) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNotNull(vmss.innerModel().virtualMachineProfile()); Assertions.assertNotNull(vmss.getPrimaryInternetFacingLoadBalancer()); Assertions.assertNotNull(vmss.getPrimaryNetwork()); Assertions.assertEquals(vmss.orchestrationMode(), OrchestrationMode.FLEXIBLE); } @Test public void canUpdateVMSSInCreateOrUpdateMode() throws Exception { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); VirtualMachineScaleSet vmss = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(vmss.orchestrationMode(), OrchestrationMode.FLEXIBLE); Assertions.assertNull(vmss.innerModel().virtualMachineProfile()); Assertions.assertNull(vmss.getPrimaryNetwork()); Assertions.assertNull(vmss.storageProfile()); Assertions.assertNull(vmss.networkProfile()); Assertions.assertNull(vmss.virtualMachinePublicIpConfig()); Assertions.assertEquals(vmss.applicationGatewayBackendAddressPoolsIds().size(), 0); Assertions.assertEquals(vmss.applicationSecurityGroupIds().size(), 0); Assertions.assertNull(vmss.billingProfile()); Assertions.assertNull(vmss.bootDiagnosticsStorageUri()); Assertions.assertNull(vmss.getPrimaryInternalLoadBalancer()); Assertions.assertEquals(vmss.vhdContainers().size(), 0); Assertions.assertEquals(vmss.listPrimaryInternalLoadBalancerBackends().size(), 0); Assertions.assertEquals(vmss.listPrimaryInternalLoadBalancerInboundNatPools().size(), 0); Assertions.assertEquals(vmss.listPrimaryInternetFacingLoadBalancerBackends().size(), 0); Assertions.assertEquals(vmss.listPrimaryInternetFacingLoadBalancerInboundNatPools().size(), 0); Assertions.assertEquals(vmss.primaryPublicIpAddressIds().size(), 0); Assertions.assertFalse(vmss.isAcceleratedNetworkingEnabled()); Assertions.assertFalse(vmss.isBootDiagnosticsEnabled()); Assertions.assertFalse(vmss.isIpForwardingEnabled()); Assertions.assertNull(vmss.networkSecurityGroupId()); Assertions.assertFalse(vmss.isManagedDiskEnabled()); vmss.update() .withTag("tag1", "value1") .apply(); Assertions.assertNotNull(vmss.tags()); Assertions.assertEquals(vmss.tags().get("tag1"), "value1"); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(Region.fromName(euapRegion), resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); final String vmssVmDnsLabel = generateRandomResourceName("pip", 10); vmss = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withVirtualMachinePublicIp(vmssVmDnsLabel) .create(); Assertions.assertNotNull(vmss.innerModel().virtualMachineProfile()); Assertions.assertEquals(vmss.orchestrationMode(), OrchestrationMode.FLEXIBLE); Assertions.assertNotNull(vmss.getPrimaryInternetFacingLoadBalancer()); Assertions.assertNotNull(vmss.getPrimaryNetwork()); vmss = this.computeManager .virtualMachineScaleSets() .getById(vmss.id()); Assertions.assertNotNull(vmss); vmss.update() .withTag("tag1", "value2") .apply(); Assertions.assertNotNull(vmss.innerModel().virtualMachineProfile()); Assertions.assertNotNull(vmss.tags()); Assertions.assertEquals(vmss.tags().get("tag1"), "value2"); Assertions.assertNotNull(vmss.getPrimaryNetwork()); Assertions.assertNotNull(vmss.storageProfile()); Assertions.assertNotNull(vmss.networkProfile()); Assertions.assertNotNull(vmss.virtualMachinePublicIpConfig()); Assertions.assertNotEquals(vmss.listPrimaryInternetFacingLoadBalancerBackends().size(), 0); Assertions.assertEquals(vmss.listPrimaryInternetFacingLoadBalancerInboundNatPools().size(), 0); Assertions.assertNotEquals(vmss.primaryPublicIpAddressIds().size(), 0); } @Test public void canGetOrchestrationType() { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = this .computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertEquals(vmss.orchestrationMode(), OrchestrationMode.UNIFORM); final String vmssName2 = generateRandomResourceName("vmss", 10); VirtualMachineScaleSet vmss2 = this .computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(euapRegion) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertNotNull(vmss2); Assertions.assertEquals(vmss2.orchestrationMode(), OrchestrationMode.FLEXIBLE); } @Test public void npeProtectionTest() throws Exception { String euapRegion = "eastus2euap"; final String vmssName = generateRandomResourceName("vmss", 10); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().define(rgName) .withRegion(euapRegion) .create(); VirtualMachineScaleSet vmss = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(euapRegion) .withExistingResourceGroup(resourceGroup) .withFlexibleOrchestrationMode() .create(); String excludeMethodsString = "start,startAsync,reimage,reimageAsync,deallocate,deallocateAsync,powerOff,powerOffAsync,restart,restartAsync"; Set<String> excludeMethods = new HashSet<>(Arrays.asList(excludeMethodsString.split(","))); Set<String> invoked = new HashSet<>(); for (Method method : VirtualMachineScaleSet.class.getDeclaredMethods()) { if (!excludeMethods.contains(method.getName()) && method.getParameterCount() == 0) { method.invoke(vmss); invoked.add(method.getName()); } } Assertions.assertTrue(invoked.contains("isEphemeralOSDisk")); Assertions.assertFalse(invoked.contains("start")); } @Test public void canBatchOperateVMSSInstance() { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this.networkManager .networks() .define("vmssvnet") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); VirtualMachineScaleSet vmss = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withoutPrimaryInternetFacingLoadBalancer() .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withUpgradeMode(UpgradeMode.AUTOMATIC) .withCapacity(3) .create(); List<VirtualMachineScaleSetVM> instances = vmss.virtualMachines().list().stream().collect(Collectors.toList()); Assertions.assertEquals(3, instances.size()); Collection<String> instanceIds = Arrays.asList(instances.get(0).instanceId(), instances.get(2).instanceId()); VirtualMachineScaleSetVMs vmInstances = vmss.virtualMachines(); VirtualMachineScaleSetVM vmInstance2 = vmss.virtualMachines().getInstance(instances.get(2).instanceId()); vmInstances.redeployInstances(instanceIds); vmInstances.powerOffInstances(instanceIds, true); vmInstance2.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vmInstance2.powerState()); vmInstances.startInstances(instanceIds); vmInstance2.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vmInstance2.powerState()); vmInstances.restartInstances(instanceIds); vmInstance2.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vmInstance2.powerState()); vmInstances.deallocateInstances(instanceIds); vmInstance2.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vmInstance2.powerState()); VirtualMachineScaleSetVM vmInstance1 = vmss.virtualMachines().getInstance(instances.get(1).instanceId()); Assertions.assertEquals(PowerState.RUNNING, vmInstance1.powerState()); } @Test }
bother? ```suggestion // Don't bother making this volatile or synchronized as this is very, very cheap to create. ```
public String convertMemberName(Member member) { if (memberNameConverter == null) { memberNameConverter = new MemberNameConverterImpl(mapper); } try { return memberNameConverter.convertMemberName(member); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } }
public String convertMemberName(Member member) { if (memberNameConverter == null) { memberNameConverter = new MemberNameConverterImpl(mapper); } try { return memberNameConverter.convertMemberName(member); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } }
class ObjectMapperShim { private static final ClientLogger LOGGER = new ClientLogger(ObjectMapperShim.class); private static final int CACHE_SIZE_LIMIT = 10000; private static final Map<Type, JavaType> TYPE_TO_JAVA_TYPE_CACHE = new ConcurrentHashMap<>(); private static final Map<Type, MethodHandle> TYPE_TO_STRONGLY_TYPED_HEADERS_CONSTRUCTOR_CACHE = new ConcurrentHashMap<>(); private static final MethodHandle NO_CONSTRUCTOR_HANDLE = MethodHandles.identity(ObjectMapperShim.class); /** * Creates the JSON {@code ObjectMapper} capable of serializing azure.core types, with flattening and additional * properties support. * * @param innerMapperShim inner mapper to use for non-azure specific serialization. * @return Instance of shimmed {@code ObjectMapperShim}. */ public static ObjectMapperShim createJsonMapper(ObjectMapperShim innerMapperShim) { try { return new ObjectMapperShim(ObjectMapperFactory.INSTANCE.createJsonMapper(innerMapperShim.mapper)); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Creates and configures JSON {@code ObjectMapper} capable of serializing azure.core types, with flattening and * additional properties support. * * @param innerMapperShim inner mapper to use for non-azure specific serialization. * @param configure applies additional configuration to {@code ObjectMapper}. * @return Instance of shimmed {@code ObjectMapperShim}. */ public static ObjectMapperShim createJsonMapper(ObjectMapperShim innerMapperShim, BiConsumer<ObjectMapper, ObjectMapper> configure) { try { ObjectMapper mapper = ObjectMapperFactory.INSTANCE.createJsonMapper(innerMapperShim.mapper); configure.accept(mapper, innerMapperShim.mapper); return new ObjectMapperShim(mapper); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Creates and configures XML {@code ObjectMapper} capable of serializing azure.core types. * * @return Instance of shimmed {@code ObjectMapperShim}. */ public static ObjectMapperShim createXmlMapper() { try { ObjectMapper mapper = ObjectMapperFactory.INSTANCE.createXmlMapper(); return new ObjectMapperShim(mapper); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Creates and configures JSON {@code ObjectMapper}. * * @return Instance of shimmed {@code ObjectMapperShim}. */ public static ObjectMapperShim createSimpleMapper() { try { return new ObjectMapperShim(ObjectMapperFactory.INSTANCE.createSimpleMapper()); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Creates and configures JSON {@code ObjectMapper} for headers serialization. * * @return Instance of shimmed {@code ObjectMapperShim}. */ public static ObjectMapperShim createHeaderMapper() { try { ObjectMapper mapper = ObjectMapperFactory.INSTANCE.createHeaderMapper(); return new ObjectMapperShim(mapper); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } private final ObjectMapper mapper; private MemberNameConverterImpl memberNameConverter; public ObjectMapperShim(ObjectMapper mapper) { this.mapper = mapper; } /** * Serializes Java object as a string. * * @param value object to serialize. * @return Serialized string. * @throws IOException */ public String writeValueAsString(Object value) throws IOException { try { return mapper.writeValueAsString(value); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Serializes Java object as a byte array. * * @param value object to serialize. * @return Serialized byte array. * @throws IOException */ public byte[] writeValueAsBytes(Object value) throws IOException { try { return mapper.writeValueAsBytes(value); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Serializes Java object and write it to stream. * * @param out stream to write serialized object to. * @param value object to serialize. * @throws IOException */ public void writeValue(OutputStream out, Object value) throws IOException { try { mapper.writeValue(out, value); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Deserializes Java object from a string. * * @param content serialized object. * @param valueType type of the value. * @return Deserialized object. * @throws IOException */ public <T> T readValue(String content, final Type valueType) throws IOException { try { final JavaType javaType = createJavaType(valueType); return mapper.readValue(content, javaType); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Deserializes Java object from a byte array. * * @param src serialized object. * @param valueType type of the value. * @return Deserialized object. * @throws IOException */ public <T> T readValue(byte[] src, final Type valueType) throws IOException { try { final JavaType javaType = createJavaType(valueType); return mapper.readValue(src, javaType); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Reads and deserializes Java object from a stream. * * @param src serialized object. * @param valueType type of the value. * @return Deserialized object. * @throws IOException */ public <T> T readValue(InputStream src, final Type valueType) throws IOException { try { final JavaType javaType = createJavaType(valueType); return mapper.readValue(src, javaType); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Reads JSON tree from string. * * @param content serialized JSON tree. * @return {@code JsonNode} instance * @throws IOException */ public JsonNode readTree(String content) throws IOException { try { return mapper.readTree(content); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Reads JSON tree from byte array. * * @param content serialized JSON tree. * @return {@code JsonNode} instance */ public JsonNode readTree(byte[] content) throws IOException { try { return mapper.readTree(content); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } private JavaType createJavaType(Type type) { if (type == null) { return null; } else if (type instanceof JavaType) { return (JavaType) type; } else if (type instanceof ParameterizedType) { final ParameterizedType parameterizedType = (ParameterizedType) type; final Type[] actualTypeArguments = parameterizedType.getActualTypeArguments(); JavaType[] javaTypeArguments = new JavaType[actualTypeArguments.length]; for (int i = 0; i != actualTypeArguments.length; i++) { javaTypeArguments[i] = createJavaType(actualTypeArguments[i]); } return getFromTypeCache(type, t -> mapper.getTypeFactory() .constructParametricType((Class<?>) parameterizedType.getRawType(), javaTypeArguments)); } else { return getFromTypeCache(type, t -> mapper.getTypeFactory().constructType(t)); } } @SuppressWarnings("unchecked") public <T> T deserialize(HttpHeaders headers, Type deserializedHeadersType) throws IOException { if (deserializedHeadersType == null) { return null; } try { MethodHandle constructor = getFromHeadersConstructorCache(deserializedHeadersType); if (constructor != NO_CONSTRUCTOR_HANDLE) { return (T) constructor.invokeWithArguments(headers); } } catch (Throwable throwable) { if (throwable instanceof Error) { throw (Error) throwable; } if (throwable instanceof RuntimeException) { throw (RuntimeException) throwable; } LOGGER.verbose("Failed to find or use MethodHandle Constructor that accepts HttpHeaders for " + deserializedHeadersType + "."); } T deserializedHeaders = mapper.convertValue(headers, createJavaType(deserializedHeadersType)); final Class<?> deserializedHeadersClass = TypeUtil.getRawClass(deserializedHeadersType); final Field[] declaredFields = deserializedHeadersClass.getDeclaredFields(); /* * A list containing all handlers for header collections of the header type. */ final List<HeaderCollectionHandler> headerCollectionHandlers = new ArrayList<>(); /* * This set is an optimization where we track the first character of all HeaderCollections defined on the * deserialized headers type. This allows us to optimize away startWiths checks which are much more costly than * getting the first character. */ final Set<Character> headerCollectionsFirstCharacters = new HashSet<>(); /* * Begin by looping over all declared fields and initializing all header collection information. */ for (final Field declaredField : declaredFields) { if (!declaredField.isAnnotationPresent(HeaderCollection.class)) { continue; } final Type declaredFieldType = declaredField.getGenericType(); if (!TypeUtil.isTypeOrSubTypeOf(declaredField.getType(), Map.class)) { continue; } final Type[] mapTypeArguments = TypeUtil.getTypeArguments(declaredFieldType); if (mapTypeArguments.length != 2 || mapTypeArguments[0] != String.class || mapTypeArguments[1] != String.class) { continue; } final HeaderCollection headerCollectionAnnotation = declaredField.getAnnotation(HeaderCollection.class); final String headerCollectionPrefix = headerCollectionAnnotation.value().toLowerCase(Locale.ROOT); final int headerCollectionPrefixLength = headerCollectionPrefix.length(); if (headerCollectionPrefixLength == 0) { continue; } headerCollectionHandlers.add(new HeaderCollectionHandler(headerCollectionPrefix, declaredField)); headerCollectionsFirstCharacters.add(headerCollectionPrefix.charAt(0)); } /* * Then loop over all headers and check if they begin with any of the prefixes found. */ for (final HttpHeader header : headers) { String headerNameLower = header.getName().toLowerCase(Locale.ROOT); /* * Optimization to skip this header as it doesn't begin with any character starting header collections in * the deserialized headers type. */ if (!headerCollectionsFirstCharacters.contains(headerNameLower.charAt(0))) { continue; } for (HeaderCollectionHandler headerCollectionHandler : headerCollectionHandlers) { if (headerCollectionHandler.headerStartsWithPrefix(headerNameLower)) { headerCollectionHandler.addHeader(header.getName(), header.getValue()); } } } /* * Finally, inject all found header collection values into the deserialized headers. */ headerCollectionHandlers.forEach(h -> h.injectValuesIntoDeclaringField(deserializedHeaders, LOGGER)); return deserializedHeaders; } public <T extends JsonNode> T valueToTree(Object fromValue) { try { return mapper.valueToTree(fromValue); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /* * Helper methods that gets the value for the given key from the cache. */ private static JavaType getFromTypeCache(Type key, Function<Type, JavaType> compute) { if (TYPE_TO_JAVA_TYPE_CACHE.size() >= CACHE_SIZE_LIMIT) { TYPE_TO_JAVA_TYPE_CACHE.clear(); } return TYPE_TO_JAVA_TYPE_CACHE.computeIfAbsent(key, compute); } private static MethodHandle getFromHeadersConstructorCache(Type key) { if (TYPE_TO_STRONGLY_TYPED_HEADERS_CONSTRUCTOR_CACHE.size() >= CACHE_SIZE_LIMIT) { TYPE_TO_STRONGLY_TYPED_HEADERS_CONSTRUCTOR_CACHE.clear(); } return TYPE_TO_STRONGLY_TYPED_HEADERS_CONSTRUCTOR_CACHE.computeIfAbsent(key, type -> { try { Class<?> headersClass = TypeUtil.getRawClass(type); MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(headersClass); return lookup.unreflectConstructor(headersClass.getDeclaredConstructor(HttpHeaders.class)); } catch (Throwable throwable) { if (throwable instanceof Error) { throw (Error) throwable; } return NO_CONSTRUCTOR_HANDLE; } }); } }
class ObjectMapperShim { private static final ClientLogger LOGGER = new ClientLogger(ObjectMapperShim.class); private static final int CACHE_SIZE_LIMIT = 10000; private static final Map<Type, JavaType> TYPE_TO_JAVA_TYPE_CACHE = new ConcurrentHashMap<>(); private static final Map<Type, MethodHandle> TYPE_TO_STRONGLY_TYPED_HEADERS_CONSTRUCTOR_CACHE = new ConcurrentHashMap<>(); private static final MethodHandle NO_CONSTRUCTOR_HANDLE = MethodHandles.identity(ObjectMapperShim.class); /** * Creates the JSON {@code ObjectMapper} capable of serializing azure.core types, with flattening and additional * properties support. * * @param innerMapperShim inner mapper to use for non-azure specific serialization. * @return Instance of shimmed {@code ObjectMapperShim}. */ public static ObjectMapperShim createJsonMapper(ObjectMapperShim innerMapperShim) { try { return new ObjectMapperShim(ObjectMapperFactory.INSTANCE.createJsonMapper(innerMapperShim.mapper)); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Creates and configures JSON {@code ObjectMapper} capable of serializing azure.core types, with flattening and * additional properties support. * * @param innerMapperShim inner mapper to use for non-azure specific serialization. * @param configure applies additional configuration to {@code ObjectMapper}. * @return Instance of shimmed {@code ObjectMapperShim}. */ public static ObjectMapperShim createJsonMapper(ObjectMapperShim innerMapperShim, BiConsumer<ObjectMapper, ObjectMapper> configure) { try { ObjectMapper mapper = ObjectMapperFactory.INSTANCE.createJsonMapper(innerMapperShim.mapper); configure.accept(mapper, innerMapperShim.mapper); return new ObjectMapperShim(mapper); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Creates and configures XML {@code ObjectMapper} capable of serializing azure.core types. * * @return Instance of shimmed {@code ObjectMapperShim}. */ public static ObjectMapperShim createXmlMapper() { try { ObjectMapper mapper = ObjectMapperFactory.INSTANCE.createXmlMapper(); return new ObjectMapperShim(mapper); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Creates and configures JSON {@code ObjectMapper}. * * @return Instance of shimmed {@code ObjectMapperShim}. */ public static ObjectMapperShim createSimpleMapper() { try { return new ObjectMapperShim(ObjectMapperFactory.INSTANCE.createSimpleMapper()); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Creates and configures JSON {@code ObjectMapper} for headers serialization. * * @return Instance of shimmed {@code ObjectMapperShim}. */ public static ObjectMapperShim createHeaderMapper() { try { ObjectMapper mapper = ObjectMapperFactory.INSTANCE.createHeaderMapper(); return new ObjectMapperShim(mapper); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } private final ObjectMapper mapper; private MemberNameConverterImpl memberNameConverter; public ObjectMapperShim(ObjectMapper mapper) { this.mapper = mapper; } /** * Serializes Java object as a string. * * @param value object to serialize. * @return Serialized string. * @throws IOException */ public String writeValueAsString(Object value) throws IOException { try { return mapper.writeValueAsString(value); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Serializes Java object as a byte array. * * @param value object to serialize. * @return Serialized byte array. * @throws IOException */ public byte[] writeValueAsBytes(Object value) throws IOException { try { return mapper.writeValueAsBytes(value); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Serializes Java object and write it to stream. * * @param out stream to write serialized object to. * @param value object to serialize. * @throws IOException */ public void writeValue(OutputStream out, Object value) throws IOException { try { mapper.writeValue(out, value); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Deserializes Java object from a string. * * @param content serialized object. * @param valueType type of the value. * @return Deserialized object. * @throws IOException */ public <T> T readValue(String content, final Type valueType) throws IOException { try { final JavaType javaType = createJavaType(valueType); return mapper.readValue(content, javaType); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Deserializes Java object from a byte array. * * @param src serialized object. * @param valueType type of the value. * @return Deserialized object. * @throws IOException */ public <T> T readValue(byte[] src, final Type valueType) throws IOException { try { final JavaType javaType = createJavaType(valueType); return mapper.readValue(src, javaType); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Reads and deserializes Java object from a stream. * * @param src serialized object. * @param valueType type of the value. * @return Deserialized object. * @throws IOException */ public <T> T readValue(InputStream src, final Type valueType) throws IOException { try { final JavaType javaType = createJavaType(valueType); return mapper.readValue(src, javaType); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Reads JSON tree from string. * * @param content serialized JSON tree. * @return {@code JsonNode} instance * @throws IOException */ public JsonNode readTree(String content) throws IOException { try { return mapper.readTree(content); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /** * Reads JSON tree from byte array. * * @param content serialized JSON tree. * @return {@code JsonNode} instance */ public JsonNode readTree(byte[] content) throws IOException { try { return mapper.readTree(content); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } private JavaType createJavaType(Type type) { if (type == null) { return null; } else if (type instanceof JavaType) { return (JavaType) type; } else if (type instanceof ParameterizedType) { final ParameterizedType parameterizedType = (ParameterizedType) type; final Type[] actualTypeArguments = parameterizedType.getActualTypeArguments(); JavaType[] javaTypeArguments = new JavaType[actualTypeArguments.length]; for (int i = 0; i != actualTypeArguments.length; i++) { javaTypeArguments[i] = createJavaType(actualTypeArguments[i]); } return getFromTypeCache(type, t -> mapper.getTypeFactory() .constructParametricType((Class<?>) parameterizedType.getRawType(), javaTypeArguments)); } else { return getFromTypeCache(type, t -> mapper.getTypeFactory().constructType(t)); } } @SuppressWarnings("unchecked") public <T> T deserialize(HttpHeaders headers, Type deserializedHeadersType) throws IOException { if (deserializedHeadersType == null) { return null; } try { MethodHandle constructor = getFromHeadersConstructorCache(deserializedHeadersType); if (constructor != NO_CONSTRUCTOR_HANDLE) { return (T) constructor.invokeWithArguments(headers); } } catch (Throwable throwable) { if (throwable instanceof Error) { throw (Error) throwable; } if (throwable instanceof RuntimeException) { throw (RuntimeException) throwable; } LOGGER.verbose("Failed to find or use MethodHandle Constructor that accepts HttpHeaders for " + deserializedHeadersType + "."); } T deserializedHeaders = mapper.convertValue(headers, createJavaType(deserializedHeadersType)); final Class<?> deserializedHeadersClass = TypeUtil.getRawClass(deserializedHeadersType); final Field[] declaredFields = deserializedHeadersClass.getDeclaredFields(); /* * A list containing all handlers for header collections of the header type. */ final List<HeaderCollectionHandler> headerCollectionHandlers = new ArrayList<>(); /* * This set is an optimization where we track the first character of all HeaderCollections defined on the * deserialized headers type. This allows us to optimize away startWiths checks which are much more costly than * getting the first character. */ final Set<Character> headerCollectionsFirstCharacters = new HashSet<>(); /* * Begin by looping over all declared fields and initializing all header collection information. */ for (final Field declaredField : declaredFields) { if (!declaredField.isAnnotationPresent(HeaderCollection.class)) { continue; } final Type declaredFieldType = declaredField.getGenericType(); if (!TypeUtil.isTypeOrSubTypeOf(declaredField.getType(), Map.class)) { continue; } final Type[] mapTypeArguments = TypeUtil.getTypeArguments(declaredFieldType); if (mapTypeArguments.length != 2 || mapTypeArguments[0] != String.class || mapTypeArguments[1] != String.class) { continue; } final HeaderCollection headerCollectionAnnotation = declaredField.getAnnotation(HeaderCollection.class); final String headerCollectionPrefix = headerCollectionAnnotation.value().toLowerCase(Locale.ROOT); final int headerCollectionPrefixLength = headerCollectionPrefix.length(); if (headerCollectionPrefixLength == 0) { continue; } headerCollectionHandlers.add(new HeaderCollectionHandler(headerCollectionPrefix, declaredField)); headerCollectionsFirstCharacters.add(headerCollectionPrefix.charAt(0)); } /* * Then loop over all headers and check if they begin with any of the prefixes found. */ for (final HttpHeader header : headers) { String headerNameLower = header.getName().toLowerCase(Locale.ROOT); /* * Optimization to skip this header as it doesn't begin with any character starting header collections in * the deserialized headers type. */ if (!headerCollectionsFirstCharacters.contains(headerNameLower.charAt(0))) { continue; } for (HeaderCollectionHandler headerCollectionHandler : headerCollectionHandlers) { if (headerCollectionHandler.headerStartsWithPrefix(headerNameLower)) { headerCollectionHandler.addHeader(header.getName(), header.getValue()); } } } /* * Finally, inject all found header collection values into the deserialized headers. */ headerCollectionHandlers.forEach(h -> h.injectValuesIntoDeclaringField(deserializedHeaders, LOGGER)); return deserializedHeaders; } public <T extends JsonNode> T valueToTree(Object fromValue) { try { return mapper.valueToTree(fromValue); } catch (LinkageError ex) { throw LOGGER.logThrowableAsError(new LinkageError(JacksonVersion.getHelpInfo(), ex)); } } /* * Helper methods that gets the value for the given key from the cache. */ private static JavaType getFromTypeCache(Type key, Function<Type, JavaType> compute) { if (TYPE_TO_JAVA_TYPE_CACHE.size() >= CACHE_SIZE_LIMIT) { TYPE_TO_JAVA_TYPE_CACHE.clear(); } return TYPE_TO_JAVA_TYPE_CACHE.computeIfAbsent(key, compute); } private static MethodHandle getFromHeadersConstructorCache(Type key) { if (TYPE_TO_STRONGLY_TYPED_HEADERS_CONSTRUCTOR_CACHE.size() >= CACHE_SIZE_LIMIT) { TYPE_TO_STRONGLY_TYPED_HEADERS_CONSTRUCTOR_CACHE.clear(); } return TYPE_TO_STRONGLY_TYPED_HEADERS_CONSTRUCTOR_CACHE.computeIfAbsent(key, type -> { try { Class<?> headersClass = TypeUtil.getRawClass(type); MethodHandles.Lookup lookup = ReflectionUtils.getLookupToUse(headersClass); return lookup.unreflectConstructor(headersClass.getDeclaredConstructor(HttpHeaders.class)); } catch (Throwable throwable) { if (throwable instanceof Error) { throw (Error) throwable; } return NO_CONSTRUCTOR_HANDLE; } }); } }
I assume what you are saying if on the ServiceEndpoint we had successful requests very recently let's throw away the failed channel rather faster because chances are good that other channels are healthy?
private String transitTimeoutValidation(Timestamps timestamps, Instant currentTime, RntbdRequestManager requestManager, Channel channel) { String transitTimeoutValidationMessage = StringUtils.EMPTY; if (this.timeoutDetectionEnabled && timestamps.transitTimeoutCount() > 0) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); long readDelay = Duration.between(timestamps.lastChannelReadTime(), currentTime).toNanos(); if (readDelay >= this.timeoutTimeLimitInNanos) { transitTimeoutValidationMessage = MessageFormat.format( "{0} health check failed due to transit timeout detection time limit: [rntbdContext: {1}," + "lastChannelRead: {2}, timeoutTimeLimitInNanos: {3}]", channel, rntbdContext, timestamps.lastReadTime, this.timeoutTimeLimitInNanos); logger.warn(transitTimeoutValidationMessage); return transitTimeoutValidationMessage; } if (timestamps.transitTimeoutCount() >= this.timeoutHighFrequencyThreshold && readDelay >= this.timeoutHighFrequencyTimeLimitInNanos) { transitTimeoutValidationMessage = MessageFormat.format( "{0} health check failed due to transit timeout high frequency threshold hit: [rntbdContext: {1}," + "lastChannelRead: {2}, transitTimeoutCount: {3}, timeoutHighFrequencyThreshold: {4}, timeoutHighFrequencyTimeLimitInNanos: {5}]", channel, rntbdContext, timestamps.lastReadTime, timestamps.transitTimeoutCount, this.timeoutHighFrequencyThreshold, this.timeoutHighFrequencyTimeLimitInNanos); logger.warn(transitTimeoutValidationMessage); return transitTimeoutValidationMessage; } if (timestamps.tansitTimeoutWriteCount() >= this.timeoutOnWriteThreshold && readDelay >= this.timeoutOnWriteTimeLimitInNanos) { transitTimeoutValidationMessage = MessageFormat.format( "{0} health check failed due to transit timeout on write threshold hit: [rntbdContext: {1}," + "lastChannelRead: {2}, transitTimeoutWriteCount: {3}, timeoutOnWriteThreshold: {4}, timeoutOnWriteTimeLimitInNanos: {5}]", channel, rntbdContext, timestamps.lastReadTime, timestamps.transitTimeoutWriteCount, this.timeoutOnWriteThreshold, this.timeoutOnWriteTimeLimitInNanos); logger.warn(transitTimeoutValidationMessage); return transitTimeoutValidationMessage; } } return transitTimeoutValidationMessage; }
private String transitTimeoutValidation(Timestamps timestamps, Instant currentTime, RntbdRequestManager requestManager, Channel channel) { String transitTimeoutValidationMessage = StringUtils.EMPTY; if (this.timeoutDetectionEnabled && timestamps.transitTimeoutCount() > 0) { if (CpuMemoryMonitor.getCpuLoad().isCpuOverThreshold(this.timeoutDetectionDisableCPUThreshold)) { return transitTimeoutValidationMessage; } final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); long readDelay = Duration.between(timestamps.lastChannelReadTime(), currentTime).toNanos(); if (readDelay >= this.timeoutTimeLimitInNanos) { transitTimeoutValidationMessage = MessageFormat.format( "{0} health check failed due to transit timeout detection time limit: [rntbdContext: {1}," + "lastChannelRead: {2}, timeoutTimeLimitInNanos: {3}]", channel, rntbdContext, timestamps.lastReadTime, this.timeoutTimeLimitInNanos); logger.warn(transitTimeoutValidationMessage); return transitTimeoutValidationMessage; } if (timestamps.transitTimeoutCount() >= this.timeoutHighFrequencyThreshold && readDelay >= this.timeoutHighFrequencyTimeLimitInNanos) { transitTimeoutValidationMessage = MessageFormat.format( "{0} health check failed due to transit timeout high frequency threshold hit: [rntbdContext: {1}," + "lastChannelRead: {2}, transitTimeoutCount: {3}, timeoutHighFrequencyThreshold: {4}, timeoutHighFrequencyTimeLimitInNanos: {5}]", channel, rntbdContext, timestamps.lastReadTime, timestamps.transitTimeoutCount, this.timeoutHighFrequencyThreshold, this.timeoutHighFrequencyTimeLimitInNanos); logger.warn(transitTimeoutValidationMessage); return transitTimeoutValidationMessage; } if (timestamps.tansitTimeoutWriteCount() >= this.timeoutOnWriteThreshold && readDelay >= this.timeoutOnWriteTimeLimitInNanos) { transitTimeoutValidationMessage = MessageFormat.format( "{0} health check failed due to transit timeout on write threshold hit: [rntbdContext: {1}," + "lastChannelRead: {2}, transitTimeoutWriteCount: {3}, timeoutOnWriteThreshold: {4}, timeoutOnWriteTimeLimitInNanos: {5}]", channel, rntbdContext, timestamps.lastReadTime, timestamps.transitTimeoutWriteCount, this.timeoutOnWriteThreshold, this.timeoutOnWriteTimeLimitInNanos); logger.warn(transitTimeoutValidationMessage); return transitTimeoutValidationMessage; } } return transitTimeoutValidationMessage; }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; @JsonProperty private final long networkRequestTimeoutInNanos; @JsonProperty private final boolean timeoutDetectionEnabled; @JsonProperty private final long timeoutTimeLimitInNanos; @JsonProperty private final int timeoutHighFrequencyThreshold; @JsonProperty private final long timeoutHighFrequencyTimeLimitInNanos; @JsonProperty private final int timeoutOnWriteThreshold; @JsonProperty private final long timeoutOnWriteTimeLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); this.networkRequestTimeoutInNanos = config.tcpNetworkRequestTimeoutInNanos(); this.timeoutDetectionEnabled = config.timeoutDetectionEnabled(); this.timeoutTimeLimitInNanos = config.timeoutDetectionTimeLimitInNanos(); this.timeoutHighFrequencyThreshold = config.timeoutDetectionHighFrequencyThreshold(); this.timeoutHighFrequencyTimeLimitInNanos = config.timeoutDetectionHighFrequencyTimeLimitInNanos(); this.timeoutOnWriteThreshold = config.timeoutDetectionOnWriteThreshold(); this.timeoutOnWriteTimeLimitInNanos = config.timeoutDetectionOnWriteTimeLimitInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final Promise<Boolean> promise = channel.eventLoop().newPromise(); this.isHealthyWithFailureReason(channel) .addListener((Future<String> future) -> { if (future.isSuccess()) { if (RntbdHealthCheckResults.SuccessValue.equals(future.get())) { promise.setSuccess(Boolean.TRUE); } else { promise.setSuccess(Boolean.FALSE); } } else { promise.setFailure(future.cause()); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result reason {@link RntbdHealthCheckResults} if the channel is healthy, otherwise return the failed reason. */ public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final Instant currentTime = Instant.now(); if (Duration.between(timestamps.lastChannelReadTime(), currentTime).toNanos() < recentReadWindowInNanos) { return promise.setSuccess(RntbdHealthCheckResults.SuccessValue); } String writeIsHangMessage = this.isWriteHang(timestamps, currentTime, requestManager, channel); if (StringUtils.isNotEmpty(writeIsHangMessage)) { return promise.setSuccess(writeIsHangMessage); } String readIsHangMessage = this.isReadHang(timestamps, currentTime, requestManager, channel); if (StringUtils.isNotEmpty(readIsHangMessage)) { return promise.setSuccess(readIsHangMessage); } String transitTimeoutValidationMessage = this.transitTimeoutValidation(timestamps, currentTime, requestManager, channel); if (StringUtils.isNotEmpty(transitTimeoutValidationMessage)) { return promise.setSuccess(transitTimeoutValidationMessage); } String idleConnectionValidationMessage = this.idleConnectionValidation(timestamps, currentTime, channel); if(StringUtils.isNotEmpty(idleConnectionValidationMessage)) { return promise.setSuccess(idleConnectionValidationMessage); } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdHealthCheckResults.SuccessValue); } else { String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); logger.warn(msg); promise.setSuccess(msg); } }); return promise; } private String isWriteHang(Timestamps timestamps, Instant currentTime, RntbdRequestManager requestManager, Channel channel) { final long writeDelayInNanos = Duration.between(timestamps.lastChannelWriteTime(), timestamps.lastChannelWriteAttemptTime()).toNanos(); final long writeHangDurationInNanos = Duration.between(timestamps.lastChannelWriteAttemptTime(), currentTime).toNanos(); String writeHangMessage = StringUtils.EMPTY; if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); writeHangMessage = MessageFormat.format( "{0} health check failed due to non-responding write: [lastChannelWriteAttemptTime: {1}, " + "lastChannelWriteTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6}]", channel, timestamps.lastChannelWriteAttemptTime(), timestamps.lastChannelWriteTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); logger.warn(writeHangMessage); } return writeHangMessage; } private String isReadHang(Timestamps timestamps, Instant currentTime, RntbdRequestManager requestManager, Channel channel) { final long readDelay = Duration.between(timestamps.lastChannelReadTime(), timestamps.lastChannelWriteTime()).toNanos(); final long readHangDuration = Duration.between(timestamps.lastChannelWriteTime(), currentTime).toNanos(); String readHangMessage = StringUtils.EMPTY; if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); readHangMessage = MessageFormat.format( "{0} health check failed due to non-responding read: [lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6}]", channel, timestamps.lastChannelWriteTime(), timestamps.lastChannelReadTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); logger.warn(readHangMessage); } return readHangMessage; } private String idleConnectionValidation(Timestamps timestamps, Instant currentTime, Channel channel) { String errorMessage = StringUtils.EMPTY; if (this.idleConnectionTimeoutInNanos > 0L) { if (Duration.between(currentTime, timestamps.lastChannelReadTime()).toNanos() > this.idleConnectionTimeoutInNanos) { errorMessage = MessageFormat.format( "{0} health check failed due to idle connection timeout: [lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}]", channel, timestamps.lastChannelWriteTime(), timestamps.lastChannelReadTime(), idleConnectionTimeoutInNanos, currentTime); logger.warn(errorMessage); } } return errorMessage; } @Override public String toString() { return RntbdObjectMapper.toString(this); } public static final class Timestamps { private static final AtomicReferenceFieldUpdater<Timestamps, Instant> lastPingUpdater = newUpdater(Timestamps.class, Instant.class, "lastPingTime"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant>lastReadUpdater = newUpdater(Timestamps.class, Instant.class, "lastReadTime"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant> lastWriteUpdater = newUpdater(Timestamps.class, Instant.class, "lastWriteTime"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant> lastWriteAttemptUpdater = newUpdater(Timestamps.class, Instant.class, "lastWriteAttemptTime"); private static final AtomicIntegerFieldUpdater<Timestamps> transitTimeoutCountUpdater = AtomicIntegerFieldUpdater.newUpdater(Timestamps.class, "transitTimeoutCount"); private static final AtomicIntegerFieldUpdater<Timestamps> transitTimeoutWriteCountUpdater = AtomicIntegerFieldUpdater.newUpdater(Timestamps.class, "transitTimeoutWriteCount"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant> transitTimeoutStartingTimeUpdater = newUpdater(Timestamps.class, Instant.class, "transitTimeoutStartingTime"); private volatile Instant lastPingTime; private volatile Instant lastReadTime; private volatile Instant lastWriteTime; private volatile Instant lastWriteAttemptTime; private volatile int transitTimeoutCount; private volatile int transitTimeoutWriteCount; private volatile Instant transitTimeoutStartingTime; public Timestamps() { lastPingUpdater.set(this, Instant.now()); lastReadUpdater.set(this, Instant.now()); lastWriteUpdater.set(this, Instant.now()); lastWriteAttemptUpdater.set(this, Instant.now()); } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingTime = lastPingUpdater.get(other); this.lastReadTime = lastReadUpdater.get(other); this.lastWriteTime = lastWriteUpdater.get(other); this.lastWriteAttemptTime = lastWriteAttemptUpdater.get(other); this.transitTimeoutCount = transitTimeoutCountUpdater.get(other); this.transitTimeoutWriteCount = transitTimeoutWriteCountUpdater.get(other); this.transitTimeoutStartingTime = transitTimeoutStartingTimeUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, Instant.now()); } public void channelReadCompleted() { lastReadUpdater.set(this, Instant.now()); } public void channelWriteAttempted() { lastWriteAttemptUpdater.set(this, Instant.now()); } public void channelWriteCompleted() { lastWriteUpdater.set(this, Instant.now()); } public void transitTimeout(boolean isReadOnly, Instant requestCreatedTime) { if (transitTimeoutCountUpdater.incrementAndGet(this) == 1) { transitTimeoutStartingTimeUpdater.set(this, requestCreatedTime); } if (!isReadOnly) { transitTimeoutWriteCountUpdater.incrementAndGet(this); } } public void resetTransitTimeout() { transitTimeoutCountUpdater.set(this, 0); transitTimeoutWriteCountUpdater.set(this, 0); transitTimeoutStartingTimeUpdater.set(this, null); } @JsonProperty public Instant lastChannelPingTime() { return lastPingUpdater.get(this); } @JsonProperty public Instant lastChannelReadTime() { return lastReadUpdater.get(this); } @JsonProperty public Instant lastChannelWriteTime() { return lastWriteUpdater.get(this); } @JsonProperty public Instant lastChannelWriteAttemptTime() { return lastWriteAttemptUpdater.get(this); } @JsonProperty public int transitTimeoutCount() { return transitTimeoutCountUpdater.get(this); } @JsonProperty public int tansitTimeoutWriteCount() { return transitTimeoutWriteCountUpdater.get(this); } @JsonProperty public Instant transitTimeoutStartingTime() { return transitTimeoutStartingTimeUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; @JsonProperty private final long networkRequestTimeoutInNanos; @JsonProperty private final boolean timeoutDetectionEnabled; @JsonProperty private final double timeoutDetectionDisableCPUThreshold; @JsonProperty private final long timeoutTimeLimitInNanos; @JsonProperty private final int timeoutHighFrequencyThreshold; @JsonProperty private final long timeoutHighFrequencyTimeLimitInNanos; @JsonProperty private final int timeoutOnWriteThreshold; @JsonProperty private final long timeoutOnWriteTimeLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); this.networkRequestTimeoutInNanos = config.tcpNetworkRequestTimeoutInNanos(); this.timeoutDetectionEnabled = config.timeoutDetectionEnabled(); this.timeoutDetectionDisableCPUThreshold = config.timeoutDetectionDisableCPUThreshold(); this.timeoutTimeLimitInNanos = config.timeoutDetectionTimeLimitInNanos(); this.timeoutHighFrequencyThreshold = config.timeoutDetectionHighFrequencyThreshold(); this.timeoutHighFrequencyTimeLimitInNanos = config.timeoutDetectionHighFrequencyTimeLimitInNanos(); this.timeoutOnWriteThreshold = config.timeoutDetectionOnWriteThreshold(); this.timeoutOnWriteTimeLimitInNanos = config.timeoutDetectionOnWriteTimeLimitInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final Promise<Boolean> promise = channel.eventLoop().newPromise(); this.isHealthyWithFailureReason(channel) .addListener((Future<String> future) -> { if (future.isSuccess()) { if (RntbdHealthCheckResults.SuccessValue.equals(future.get())) { promise.setSuccess(Boolean.TRUE); } else { promise.setSuccess(Boolean.FALSE); } } else { promise.setFailure(future.cause()); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result reason {@link RntbdHealthCheckResults} if the channel is healthy, otherwise return the failed reason. */ public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final Instant currentTime = Instant.now(); if (Duration.between(timestamps.lastChannelReadTime(), currentTime).toNanos() < recentReadWindowInNanos) { return promise.setSuccess(RntbdHealthCheckResults.SuccessValue); } String writeIsHangMessage = this.isWriteHang(timestamps, currentTime, requestManager, channel); if (StringUtils.isNotEmpty(writeIsHangMessage)) { return promise.setSuccess(writeIsHangMessage); } String readIsHangMessage = this.isReadHang(timestamps, currentTime, requestManager, channel); if (StringUtils.isNotEmpty(readIsHangMessage)) { return promise.setSuccess(readIsHangMessage); } String transitTimeoutValidationMessage = this.transitTimeoutValidation(timestamps, currentTime, requestManager, channel); if (StringUtils.isNotEmpty(transitTimeoutValidationMessage)) { return promise.setSuccess(transitTimeoutValidationMessage); } String idleConnectionValidationMessage = this.idleConnectionValidation(timestamps, currentTime, channel); if(StringUtils.isNotEmpty(idleConnectionValidationMessage)) { return promise.setSuccess(idleConnectionValidationMessage); } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdHealthCheckResults.SuccessValue); } else { String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); logger.warn(msg); promise.setSuccess(msg); } }); return promise; } private String isWriteHang(Timestamps timestamps, Instant currentTime, RntbdRequestManager requestManager, Channel channel) { final long writeDelayInNanos = Duration.between(timestamps.lastChannelWriteTime(), timestamps.lastChannelWriteAttemptTime()).toNanos(); final long writeHangDurationInNanos = Duration.between(timestamps.lastChannelWriteAttemptTime(), currentTime).toNanos(); String writeHangMessage = StringUtils.EMPTY; if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); writeHangMessage = MessageFormat.format( "{0} health check failed due to non-responding write: [lastChannelWriteAttemptTime: {1}, " + "lastChannelWriteTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6}]", channel, timestamps.lastChannelWriteAttemptTime(), timestamps.lastChannelWriteTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); logger.warn(writeHangMessage); } return writeHangMessage; } private String isReadHang(Timestamps timestamps, Instant currentTime, RntbdRequestManager requestManager, Channel channel) { final long readDelay = Duration.between(timestamps.lastChannelReadTime(), timestamps.lastChannelWriteTime()).toNanos(); final long readHangDuration = Duration.between(timestamps.lastChannelWriteTime(), currentTime).toNanos(); String readHangMessage = StringUtils.EMPTY; if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); readHangMessage = MessageFormat.format( "{0} health check failed due to non-responding read: [lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6}]", channel, timestamps.lastChannelWriteTime(), timestamps.lastChannelReadTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); logger.warn(readHangMessage); } return readHangMessage; } private String idleConnectionValidation(Timestamps timestamps, Instant currentTime, Channel channel) { String errorMessage = StringUtils.EMPTY; if (this.idleConnectionTimeoutInNanos > 0L) { if (Duration.between(currentTime, timestamps.lastChannelReadTime()).toNanos() > this.idleConnectionTimeoutInNanos) { errorMessage = MessageFormat.format( "{0} health check failed due to idle connection timeout: [lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}]", channel, timestamps.lastChannelWriteTime(), timestamps.lastChannelReadTime(), idleConnectionTimeoutInNanos, currentTime); logger.warn(errorMessage); } } return errorMessage; } @Override public String toString() { return RntbdObjectMapper.toString(this); } public static final class Timestamps { private static final AtomicReferenceFieldUpdater<Timestamps, Instant> lastPingUpdater = newUpdater(Timestamps.class, Instant.class, "lastPingTime"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant>lastReadUpdater = newUpdater(Timestamps.class, Instant.class, "lastReadTime"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant> lastWriteUpdater = newUpdater(Timestamps.class, Instant.class, "lastWriteTime"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant> lastWriteAttemptUpdater = newUpdater(Timestamps.class, Instant.class, "lastWriteAttemptTime"); private static final AtomicIntegerFieldUpdater<Timestamps> transitTimeoutCountUpdater = AtomicIntegerFieldUpdater.newUpdater(Timestamps.class, "transitTimeoutCount"); private static final AtomicIntegerFieldUpdater<Timestamps> transitTimeoutWriteCountUpdater = AtomicIntegerFieldUpdater.newUpdater(Timestamps.class, "transitTimeoutWriteCount"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant> transitTimeoutStartingTimeUpdater = newUpdater(Timestamps.class, Instant.class, "transitTimeoutStartingTime"); private volatile Instant lastPingTime; private volatile Instant lastReadTime; private volatile Instant lastWriteTime; private volatile Instant lastWriteAttemptTime; private volatile int transitTimeoutCount; private volatile int transitTimeoutWriteCount; private volatile Instant transitTimeoutStartingTime; public Timestamps() { lastPingUpdater.set(this, Instant.now()); lastReadUpdater.set(this, Instant.now()); lastWriteUpdater.set(this, Instant.now()); lastWriteAttemptUpdater.set(this, Instant.now()); } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingTime = lastPingUpdater.get(other); this.lastReadTime = lastReadUpdater.get(other); this.lastWriteTime = lastWriteUpdater.get(other); this.lastWriteAttemptTime = lastWriteAttemptUpdater.get(other); this.transitTimeoutCount = transitTimeoutCountUpdater.get(other); this.transitTimeoutWriteCount = transitTimeoutWriteCountUpdater.get(other); this.transitTimeoutStartingTime = transitTimeoutStartingTimeUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, Instant.now()); } public void channelReadCompleted() { lastReadUpdater.set(this, Instant.now()); } public void channelWriteAttempted() { lastWriteAttemptUpdater.set(this, Instant.now()); } public void channelWriteCompleted() { lastWriteUpdater.set(this, Instant.now()); } public void transitTimeout(boolean isReadOnly, Instant requestCreatedTime) { if (transitTimeoutCountUpdater.incrementAndGet(this) == 1) { transitTimeoutStartingTimeUpdater.set(this, requestCreatedTime); } if (!isReadOnly) { transitTimeoutWriteCountUpdater.incrementAndGet(this); } } public void resetTransitTimeout() { transitTimeoutCountUpdater.set(this, 0); transitTimeoutWriteCountUpdater.set(this, 0); transitTimeoutStartingTimeUpdater.set(this, null); } @JsonProperty public Instant lastChannelPingTime() { return lastPingUpdater.get(this); } @JsonProperty public Instant lastChannelReadTime() { return lastReadUpdater.get(this); } @JsonProperty public Instant lastChannelWriteTime() { return lastWriteUpdater.get(this); } @JsonProperty public Instant lastChannelWriteAttemptTime() { return lastWriteAttemptUpdater.get(this); } @JsonProperty public int transitTimeoutCount() { return transitTimeoutCountUpdater.get(this); } @JsonProperty public int tansitTimeoutWriteCount() { return transitTimeoutWriteCountUpdater.get(this); } @JsonProperty public Instant transitTimeoutStartingTime() { return transitTimeoutStartingTimeUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
Removed it from this PR as it the timeoutDetectionTimeLimit check should already cover part of this case as well.
private String transitTimeoutValidation(Timestamps timestamps, Instant currentTime, RntbdRequestManager requestManager, Channel channel) { String transitTimeoutValidationMessage = StringUtils.EMPTY; if (this.timeoutDetectionEnabled && timestamps.transitTimeoutCount() > 0) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); long readDelay = Duration.between(timestamps.lastChannelReadTime(), currentTime).toNanos(); if (readDelay >= this.timeoutTimeLimitInNanos) { transitTimeoutValidationMessage = MessageFormat.format( "{0} health check failed due to transit timeout detection time limit: [rntbdContext: {1}," + "lastChannelRead: {2}, timeoutTimeLimitInNanos: {3}]", channel, rntbdContext, timestamps.lastReadTime, this.timeoutTimeLimitInNanos); logger.warn(transitTimeoutValidationMessage); return transitTimeoutValidationMessage; } if (timestamps.transitTimeoutCount() >= this.timeoutHighFrequencyThreshold && readDelay >= this.timeoutHighFrequencyTimeLimitInNanos) { transitTimeoutValidationMessage = MessageFormat.format( "{0} health check failed due to transit timeout high frequency threshold hit: [rntbdContext: {1}," + "lastChannelRead: {2}, transitTimeoutCount: {3}, timeoutHighFrequencyThreshold: {4}, timeoutHighFrequencyTimeLimitInNanos: {5}]", channel, rntbdContext, timestamps.lastReadTime, timestamps.transitTimeoutCount, this.timeoutHighFrequencyThreshold, this.timeoutHighFrequencyTimeLimitInNanos); logger.warn(transitTimeoutValidationMessage); return transitTimeoutValidationMessage; } if (timestamps.tansitTimeoutWriteCount() >= this.timeoutOnWriteThreshold && readDelay >= this.timeoutOnWriteTimeLimitInNanos) { transitTimeoutValidationMessage = MessageFormat.format( "{0} health check failed due to transit timeout on write threshold hit: [rntbdContext: {1}," + "lastChannelRead: {2}, transitTimeoutWriteCount: {3}, timeoutOnWriteThreshold: {4}, timeoutOnWriteTimeLimitInNanos: {5}]", channel, rntbdContext, timestamps.lastReadTime, timestamps.transitTimeoutWriteCount, this.timeoutOnWriteThreshold, this.timeoutOnWriteTimeLimitInNanos); logger.warn(transitTimeoutValidationMessage); return transitTimeoutValidationMessage; } } return transitTimeoutValidationMessage; }
private String transitTimeoutValidation(Timestamps timestamps, Instant currentTime, RntbdRequestManager requestManager, Channel channel) { String transitTimeoutValidationMessage = StringUtils.EMPTY; if (this.timeoutDetectionEnabled && timestamps.transitTimeoutCount() > 0) { if (CpuMemoryMonitor.getCpuLoad().isCpuOverThreshold(this.timeoutDetectionDisableCPUThreshold)) { return transitTimeoutValidationMessage; } final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); long readDelay = Duration.between(timestamps.lastChannelReadTime(), currentTime).toNanos(); if (readDelay >= this.timeoutTimeLimitInNanos) { transitTimeoutValidationMessage = MessageFormat.format( "{0} health check failed due to transit timeout detection time limit: [rntbdContext: {1}," + "lastChannelRead: {2}, timeoutTimeLimitInNanos: {3}]", channel, rntbdContext, timestamps.lastReadTime, this.timeoutTimeLimitInNanos); logger.warn(transitTimeoutValidationMessage); return transitTimeoutValidationMessage; } if (timestamps.transitTimeoutCount() >= this.timeoutHighFrequencyThreshold && readDelay >= this.timeoutHighFrequencyTimeLimitInNanos) { transitTimeoutValidationMessage = MessageFormat.format( "{0} health check failed due to transit timeout high frequency threshold hit: [rntbdContext: {1}," + "lastChannelRead: {2}, transitTimeoutCount: {3}, timeoutHighFrequencyThreshold: {4}, timeoutHighFrequencyTimeLimitInNanos: {5}]", channel, rntbdContext, timestamps.lastReadTime, timestamps.transitTimeoutCount, this.timeoutHighFrequencyThreshold, this.timeoutHighFrequencyTimeLimitInNanos); logger.warn(transitTimeoutValidationMessage); return transitTimeoutValidationMessage; } if (timestamps.tansitTimeoutWriteCount() >= this.timeoutOnWriteThreshold && readDelay >= this.timeoutOnWriteTimeLimitInNanos) { transitTimeoutValidationMessage = MessageFormat.format( "{0} health check failed due to transit timeout on write threshold hit: [rntbdContext: {1}," + "lastChannelRead: {2}, transitTimeoutWriteCount: {3}, timeoutOnWriteThreshold: {4}, timeoutOnWriteTimeLimitInNanos: {5}]", channel, rntbdContext, timestamps.lastReadTime, timestamps.transitTimeoutWriteCount, this.timeoutOnWriteThreshold, this.timeoutOnWriteTimeLimitInNanos); logger.warn(transitTimeoutValidationMessage); return transitTimeoutValidationMessage; } } return transitTimeoutValidationMessage; }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; @JsonProperty private final long networkRequestTimeoutInNanos; @JsonProperty private final boolean timeoutDetectionEnabled; @JsonProperty private final long timeoutTimeLimitInNanos; @JsonProperty private final int timeoutHighFrequencyThreshold; @JsonProperty private final long timeoutHighFrequencyTimeLimitInNanos; @JsonProperty private final int timeoutOnWriteThreshold; @JsonProperty private final long timeoutOnWriteTimeLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); this.networkRequestTimeoutInNanos = config.tcpNetworkRequestTimeoutInNanos(); this.timeoutDetectionEnabled = config.timeoutDetectionEnabled(); this.timeoutTimeLimitInNanos = config.timeoutDetectionTimeLimitInNanos(); this.timeoutHighFrequencyThreshold = config.timeoutDetectionHighFrequencyThreshold(); this.timeoutHighFrequencyTimeLimitInNanos = config.timeoutDetectionHighFrequencyTimeLimitInNanos(); this.timeoutOnWriteThreshold = config.timeoutDetectionOnWriteThreshold(); this.timeoutOnWriteTimeLimitInNanos = config.timeoutDetectionOnWriteTimeLimitInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final Promise<Boolean> promise = channel.eventLoop().newPromise(); this.isHealthyWithFailureReason(channel) .addListener((Future<String> future) -> { if (future.isSuccess()) { if (RntbdHealthCheckResults.SuccessValue.equals(future.get())) { promise.setSuccess(Boolean.TRUE); } else { promise.setSuccess(Boolean.FALSE); } } else { promise.setFailure(future.cause()); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result reason {@link RntbdHealthCheckResults} if the channel is healthy, otherwise return the failed reason. */ public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final Instant currentTime = Instant.now(); if (Duration.between(timestamps.lastChannelReadTime(), currentTime).toNanos() < recentReadWindowInNanos) { return promise.setSuccess(RntbdHealthCheckResults.SuccessValue); } String writeIsHangMessage = this.isWriteHang(timestamps, currentTime, requestManager, channel); if (StringUtils.isNotEmpty(writeIsHangMessage)) { return promise.setSuccess(writeIsHangMessage); } String readIsHangMessage = this.isReadHang(timestamps, currentTime, requestManager, channel); if (StringUtils.isNotEmpty(readIsHangMessage)) { return promise.setSuccess(readIsHangMessage); } String transitTimeoutValidationMessage = this.transitTimeoutValidation(timestamps, currentTime, requestManager, channel); if (StringUtils.isNotEmpty(transitTimeoutValidationMessage)) { return promise.setSuccess(transitTimeoutValidationMessage); } String idleConnectionValidationMessage = this.idleConnectionValidation(timestamps, currentTime, channel); if(StringUtils.isNotEmpty(idleConnectionValidationMessage)) { return promise.setSuccess(idleConnectionValidationMessage); } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdHealthCheckResults.SuccessValue); } else { String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); logger.warn(msg); promise.setSuccess(msg); } }); return promise; } private String isWriteHang(Timestamps timestamps, Instant currentTime, RntbdRequestManager requestManager, Channel channel) { final long writeDelayInNanos = Duration.between(timestamps.lastChannelWriteTime(), timestamps.lastChannelWriteAttemptTime()).toNanos(); final long writeHangDurationInNanos = Duration.between(timestamps.lastChannelWriteAttemptTime(), currentTime).toNanos(); String writeHangMessage = StringUtils.EMPTY; if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); writeHangMessage = MessageFormat.format( "{0} health check failed due to non-responding write: [lastChannelWriteAttemptTime: {1}, " + "lastChannelWriteTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6}]", channel, timestamps.lastChannelWriteAttemptTime(), timestamps.lastChannelWriteTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); logger.warn(writeHangMessage); } return writeHangMessage; } private String isReadHang(Timestamps timestamps, Instant currentTime, RntbdRequestManager requestManager, Channel channel) { final long readDelay = Duration.between(timestamps.lastChannelReadTime(), timestamps.lastChannelWriteTime()).toNanos(); final long readHangDuration = Duration.between(timestamps.lastChannelWriteTime(), currentTime).toNanos(); String readHangMessage = StringUtils.EMPTY; if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); readHangMessage = MessageFormat.format( "{0} health check failed due to non-responding read: [lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6}]", channel, timestamps.lastChannelWriteTime(), timestamps.lastChannelReadTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); logger.warn(readHangMessage); } return readHangMessage; } private String idleConnectionValidation(Timestamps timestamps, Instant currentTime, Channel channel) { String errorMessage = StringUtils.EMPTY; if (this.idleConnectionTimeoutInNanos > 0L) { if (Duration.between(currentTime, timestamps.lastChannelReadTime()).toNanos() > this.idleConnectionTimeoutInNanos) { errorMessage = MessageFormat.format( "{0} health check failed due to idle connection timeout: [lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}]", channel, timestamps.lastChannelWriteTime(), timestamps.lastChannelReadTime(), idleConnectionTimeoutInNanos, currentTime); logger.warn(errorMessage); } } return errorMessage; } @Override public String toString() { return RntbdObjectMapper.toString(this); } public static final class Timestamps { private static final AtomicReferenceFieldUpdater<Timestamps, Instant> lastPingUpdater = newUpdater(Timestamps.class, Instant.class, "lastPingTime"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant>lastReadUpdater = newUpdater(Timestamps.class, Instant.class, "lastReadTime"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant> lastWriteUpdater = newUpdater(Timestamps.class, Instant.class, "lastWriteTime"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant> lastWriteAttemptUpdater = newUpdater(Timestamps.class, Instant.class, "lastWriteAttemptTime"); private static final AtomicIntegerFieldUpdater<Timestamps> transitTimeoutCountUpdater = AtomicIntegerFieldUpdater.newUpdater(Timestamps.class, "transitTimeoutCount"); private static final AtomicIntegerFieldUpdater<Timestamps> transitTimeoutWriteCountUpdater = AtomicIntegerFieldUpdater.newUpdater(Timestamps.class, "transitTimeoutWriteCount"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant> transitTimeoutStartingTimeUpdater = newUpdater(Timestamps.class, Instant.class, "transitTimeoutStartingTime"); private volatile Instant lastPingTime; private volatile Instant lastReadTime; private volatile Instant lastWriteTime; private volatile Instant lastWriteAttemptTime; private volatile int transitTimeoutCount; private volatile int transitTimeoutWriteCount; private volatile Instant transitTimeoutStartingTime; public Timestamps() { lastPingUpdater.set(this, Instant.now()); lastReadUpdater.set(this, Instant.now()); lastWriteUpdater.set(this, Instant.now()); lastWriteAttemptUpdater.set(this, Instant.now()); } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingTime = lastPingUpdater.get(other); this.lastReadTime = lastReadUpdater.get(other); this.lastWriteTime = lastWriteUpdater.get(other); this.lastWriteAttemptTime = lastWriteAttemptUpdater.get(other); this.transitTimeoutCount = transitTimeoutCountUpdater.get(other); this.transitTimeoutWriteCount = transitTimeoutWriteCountUpdater.get(other); this.transitTimeoutStartingTime = transitTimeoutStartingTimeUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, Instant.now()); } public void channelReadCompleted() { lastReadUpdater.set(this, Instant.now()); } public void channelWriteAttempted() { lastWriteAttemptUpdater.set(this, Instant.now()); } public void channelWriteCompleted() { lastWriteUpdater.set(this, Instant.now()); } public void transitTimeout(boolean isReadOnly, Instant requestCreatedTime) { if (transitTimeoutCountUpdater.incrementAndGet(this) == 1) { transitTimeoutStartingTimeUpdater.set(this, requestCreatedTime); } if (!isReadOnly) { transitTimeoutWriteCountUpdater.incrementAndGet(this); } } public void resetTransitTimeout() { transitTimeoutCountUpdater.set(this, 0); transitTimeoutWriteCountUpdater.set(this, 0); transitTimeoutStartingTimeUpdater.set(this, null); } @JsonProperty public Instant lastChannelPingTime() { return lastPingUpdater.get(this); } @JsonProperty public Instant lastChannelReadTime() { return lastReadUpdater.get(this); } @JsonProperty public Instant lastChannelWriteTime() { return lastWriteUpdater.get(this); } @JsonProperty public Instant lastChannelWriteAttemptTime() { return lastWriteAttemptUpdater.get(this); } @JsonProperty public int transitTimeoutCount() { return transitTimeoutCountUpdater.get(this); } @JsonProperty public int tansitTimeoutWriteCount() { return transitTimeoutWriteCountUpdater.get(this); } @JsonProperty public Instant transitTimeoutStartingTime() { return transitTimeoutStartingTimeUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; @JsonProperty private final long networkRequestTimeoutInNanos; @JsonProperty private final boolean timeoutDetectionEnabled; @JsonProperty private final double timeoutDetectionDisableCPUThreshold; @JsonProperty private final long timeoutTimeLimitInNanos; @JsonProperty private final int timeoutHighFrequencyThreshold; @JsonProperty private final long timeoutHighFrequencyTimeLimitInNanos; @JsonProperty private final int timeoutOnWriteThreshold; @JsonProperty private final long timeoutOnWriteTimeLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); this.networkRequestTimeoutInNanos = config.tcpNetworkRequestTimeoutInNanos(); this.timeoutDetectionEnabled = config.timeoutDetectionEnabled(); this.timeoutDetectionDisableCPUThreshold = config.timeoutDetectionDisableCPUThreshold(); this.timeoutTimeLimitInNanos = config.timeoutDetectionTimeLimitInNanos(); this.timeoutHighFrequencyThreshold = config.timeoutDetectionHighFrequencyThreshold(); this.timeoutHighFrequencyTimeLimitInNanos = config.timeoutDetectionHighFrequencyTimeLimitInNanos(); this.timeoutOnWriteThreshold = config.timeoutDetectionOnWriteThreshold(); this.timeoutOnWriteTimeLimitInNanos = config.timeoutDetectionOnWriteTimeLimitInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final Promise<Boolean> promise = channel.eventLoop().newPromise(); this.isHealthyWithFailureReason(channel) .addListener((Future<String> future) -> { if (future.isSuccess()) { if (RntbdHealthCheckResults.SuccessValue.equals(future.get())) { promise.setSuccess(Boolean.TRUE); } else { promise.setSuccess(Boolean.FALSE); } } else { promise.setFailure(future.cause()); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result reason {@link RntbdHealthCheckResults} if the channel is healthy, otherwise return the failed reason. */ public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final Instant currentTime = Instant.now(); if (Duration.between(timestamps.lastChannelReadTime(), currentTime).toNanos() < recentReadWindowInNanos) { return promise.setSuccess(RntbdHealthCheckResults.SuccessValue); } String writeIsHangMessage = this.isWriteHang(timestamps, currentTime, requestManager, channel); if (StringUtils.isNotEmpty(writeIsHangMessage)) { return promise.setSuccess(writeIsHangMessage); } String readIsHangMessage = this.isReadHang(timestamps, currentTime, requestManager, channel); if (StringUtils.isNotEmpty(readIsHangMessage)) { return promise.setSuccess(readIsHangMessage); } String transitTimeoutValidationMessage = this.transitTimeoutValidation(timestamps, currentTime, requestManager, channel); if (StringUtils.isNotEmpty(transitTimeoutValidationMessage)) { return promise.setSuccess(transitTimeoutValidationMessage); } String idleConnectionValidationMessage = this.idleConnectionValidation(timestamps, currentTime, channel); if(StringUtils.isNotEmpty(idleConnectionValidationMessage)) { return promise.setSuccess(idleConnectionValidationMessage); } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdHealthCheckResults.SuccessValue); } else { String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); logger.warn(msg); promise.setSuccess(msg); } }); return promise; } private String isWriteHang(Timestamps timestamps, Instant currentTime, RntbdRequestManager requestManager, Channel channel) { final long writeDelayInNanos = Duration.between(timestamps.lastChannelWriteTime(), timestamps.lastChannelWriteAttemptTime()).toNanos(); final long writeHangDurationInNanos = Duration.between(timestamps.lastChannelWriteAttemptTime(), currentTime).toNanos(); String writeHangMessage = StringUtils.EMPTY; if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); writeHangMessage = MessageFormat.format( "{0} health check failed due to non-responding write: [lastChannelWriteAttemptTime: {1}, " + "lastChannelWriteTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6}]", channel, timestamps.lastChannelWriteAttemptTime(), timestamps.lastChannelWriteTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); logger.warn(writeHangMessage); } return writeHangMessage; } private String isReadHang(Timestamps timestamps, Instant currentTime, RntbdRequestManager requestManager, Channel channel) { final long readDelay = Duration.between(timestamps.lastChannelReadTime(), timestamps.lastChannelWriteTime()).toNanos(); final long readHangDuration = Duration.between(timestamps.lastChannelWriteTime(), currentTime).toNanos(); String readHangMessage = StringUtils.EMPTY; if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); readHangMessage = MessageFormat.format( "{0} health check failed due to non-responding read: [lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6}]", channel, timestamps.lastChannelWriteTime(), timestamps.lastChannelReadTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); logger.warn(readHangMessage); } return readHangMessage; } private String idleConnectionValidation(Timestamps timestamps, Instant currentTime, Channel channel) { String errorMessage = StringUtils.EMPTY; if (this.idleConnectionTimeoutInNanos > 0L) { if (Duration.between(currentTime, timestamps.lastChannelReadTime()).toNanos() > this.idleConnectionTimeoutInNanos) { errorMessage = MessageFormat.format( "{0} health check failed due to idle connection timeout: [lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}]", channel, timestamps.lastChannelWriteTime(), timestamps.lastChannelReadTime(), idleConnectionTimeoutInNanos, currentTime); logger.warn(errorMessage); } } return errorMessage; } @Override public String toString() { return RntbdObjectMapper.toString(this); } public static final class Timestamps { private static final AtomicReferenceFieldUpdater<Timestamps, Instant> lastPingUpdater = newUpdater(Timestamps.class, Instant.class, "lastPingTime"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant>lastReadUpdater = newUpdater(Timestamps.class, Instant.class, "lastReadTime"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant> lastWriteUpdater = newUpdater(Timestamps.class, Instant.class, "lastWriteTime"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant> lastWriteAttemptUpdater = newUpdater(Timestamps.class, Instant.class, "lastWriteAttemptTime"); private static final AtomicIntegerFieldUpdater<Timestamps> transitTimeoutCountUpdater = AtomicIntegerFieldUpdater.newUpdater(Timestamps.class, "transitTimeoutCount"); private static final AtomicIntegerFieldUpdater<Timestamps> transitTimeoutWriteCountUpdater = AtomicIntegerFieldUpdater.newUpdater(Timestamps.class, "transitTimeoutWriteCount"); private static final AtomicReferenceFieldUpdater<Timestamps, Instant> transitTimeoutStartingTimeUpdater = newUpdater(Timestamps.class, Instant.class, "transitTimeoutStartingTime"); private volatile Instant lastPingTime; private volatile Instant lastReadTime; private volatile Instant lastWriteTime; private volatile Instant lastWriteAttemptTime; private volatile int transitTimeoutCount; private volatile int transitTimeoutWriteCount; private volatile Instant transitTimeoutStartingTime; public Timestamps() { lastPingUpdater.set(this, Instant.now()); lastReadUpdater.set(this, Instant.now()); lastWriteUpdater.set(this, Instant.now()); lastWriteAttemptUpdater.set(this, Instant.now()); } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingTime = lastPingUpdater.get(other); this.lastReadTime = lastReadUpdater.get(other); this.lastWriteTime = lastWriteUpdater.get(other); this.lastWriteAttemptTime = lastWriteAttemptUpdater.get(other); this.transitTimeoutCount = transitTimeoutCountUpdater.get(other); this.transitTimeoutWriteCount = transitTimeoutWriteCountUpdater.get(other); this.transitTimeoutStartingTime = transitTimeoutStartingTimeUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, Instant.now()); } public void channelReadCompleted() { lastReadUpdater.set(this, Instant.now()); } public void channelWriteAttempted() { lastWriteAttemptUpdater.set(this, Instant.now()); } public void channelWriteCompleted() { lastWriteUpdater.set(this, Instant.now()); } public void transitTimeout(boolean isReadOnly, Instant requestCreatedTime) { if (transitTimeoutCountUpdater.incrementAndGet(this) == 1) { transitTimeoutStartingTimeUpdater.set(this, requestCreatedTime); } if (!isReadOnly) { transitTimeoutWriteCountUpdater.incrementAndGet(this); } } public void resetTransitTimeout() { transitTimeoutCountUpdater.set(this, 0); transitTimeoutWriteCountUpdater.set(this, 0); transitTimeoutStartingTimeUpdater.set(this, null); } @JsonProperty public Instant lastChannelPingTime() { return lastPingUpdater.get(this); } @JsonProperty public Instant lastChannelReadTime() { return lastReadUpdater.get(this); } @JsonProperty public Instant lastChannelWriteTime() { return lastWriteUpdater.get(this); } @JsonProperty public Instant lastChannelWriteAttemptTime() { return lastWriteAttemptUpdater.get(this); } @JsonProperty public int transitTimeoutCount() { return transitTimeoutCountUpdater.get(this); } @JsonProperty public int tansitTimeoutWriteCount() { return transitTimeoutWriteCountUpdater.get(this); } @JsonProperty public Instant transitTimeoutStartingTime() { return transitTimeoutStartingTimeUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
thanks for fixing these tests
public void faultInjectionServerErrorRuleTests_ServerResponseDelay() throws JsonProcessingException { CosmosAsyncClient newClient = null; String timeoutRuleId = "serverErrorRule-transitTimeout-" + UUID.randomUUID(); FaultInjectionRule timeoutRule = new FaultInjectionRuleBuilder(timeoutRuleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.READ_ITEM) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY) .times(1) .delay(Duration.ofSeconds(6)) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { DirectConnectionConfig directConnectionConfig = DirectConnectionConfig.getDefaultConfig(); directConnectionConfig.setConnectTimeout(Duration.ofSeconds(1)); newClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .consistencyLevel(BridgeInternal.getContextClient(this.client).getConsistencyLevel()) .directMode(directConnectionConfig) .buildAsyncClient(); CosmosAsyncContainer container = newClient .getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); TestItem createdItem = TestItem.createNewItem(); container.createItem(createdItem).block(); CosmosFaultInjectionHelper.configureFaultInjectionRules(container, Arrays.asList(timeoutRule)).block(); CosmosItemResponse<TestItem> itemResponse = container.readItem(createdItem.getId(), new PartitionKey(createdItem.getId()), TestItem.class).block(); assertThat(timeoutRule.getHitCount()).isEqualTo(1); this.validateFaultInjectionRuleApplied( itemResponse.getDiagnostics(), OperationType.Read, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.TRANSPORT_GENERATED_410, timeoutRuleId, true ); } finally { timeoutRule.disable(); safeClose(newClient); } }
HttpConstants.SubStatusCodes.TRANSPORT_GENERATED_410,
public void faultInjectionServerErrorRuleTests_ServerResponseDelay() throws JsonProcessingException { CosmosAsyncClient newClient = null; String timeoutRuleId = "serverErrorRule-transitTimeout-" + UUID.randomUUID(); FaultInjectionRule timeoutRule = new FaultInjectionRuleBuilder(timeoutRuleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.READ_ITEM) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY) .times(1) .delay(Duration.ofSeconds(6)) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { DirectConnectionConfig directConnectionConfig = DirectConnectionConfig.getDefaultConfig(); directConnectionConfig.setConnectTimeout(Duration.ofSeconds(1)); newClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .consistencyLevel(BridgeInternal.getContextClient(this.client).getConsistencyLevel()) .directMode(directConnectionConfig) .buildAsyncClient(); CosmosAsyncContainer container = newClient .getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); TestItem createdItem = TestItem.createNewItem(); container.createItem(createdItem).block(); CosmosFaultInjectionHelper.configureFaultInjectionRules(container, Arrays.asList(timeoutRule)).block(); CosmosItemResponse<TestItem> itemResponse = container.readItem(createdItem.getId(), new PartitionKey(createdItem.getId()), TestItem.class).block(); assertThat(timeoutRule.getHitCount()).isEqualTo(1); this.validateHitCount(timeoutRule, 1, OperationType.Read, ResourceType.Document); this.validateFaultInjectionRuleApplied( itemResponse.getDiagnostics(), OperationType.Read, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.TRANSPORT_GENERATED_410, timeoutRuleId, true ); } finally { timeoutRule.disable(); safeClose(newClient); } }
class FaultInjectionServerErrorRuleTests extends TestSuiteBase { private static final int TIMEOUT = 60000; private CosmosAsyncClient client; private CosmosAsyncContainer cosmosAsyncContainer; private DatabaseAccount databaseAccount; private Map<String, String> readRegionMap; private Map<String, String> writeRegionMap; @Factory(dataProvider = "simpleClientBuildersWithJustDirectTcp") public FaultInjectionServerErrorRuleTests(CosmosClientBuilder clientBuilder) { super(clientBuilder); this.subscriberValidationTimeout = TIMEOUT; } @BeforeClass(groups = {"multi-region", "simple"}, timeOut = TIMEOUT) public void beforeClass() { client = getClientBuilder().buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(client); GlobalEndpointManager globalEndpointManager = asyncDocumentClient.getGlobalEndpointManager(); DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); this.databaseAccount = databaseAccount; this.cosmosAsyncContainer = getSharedMultiPartitionCosmosContainerWithIdAsPartitionKey(client); this.readRegionMap = this.getRegionMap(databaseAccount, false); this.writeRegionMap = this.getRegionMap(databaseAccount, true); } @DataProvider(name = "operationTypeProvider") public static Object[][] operationTypeProvider() { return new Object[][]{ { OperationType.Read }, { OperationType.Replace }, { OperationType.Create }, { OperationType.Delete }, { OperationType.Query }, { OperationType.Patch } }; } @DataProvider(name = "faultInjectionOperationTypeProvider") public static Object[][] faultInjectionOperationTypeProvider() { return new Object[][]{ { FaultInjectionOperationType.READ_ITEM, false }, { FaultInjectionOperationType.REPLACE_ITEM, true }, { FaultInjectionOperationType.CREATE_ITEM, true }, { FaultInjectionOperationType.DELETE_ITEM, true}, { FaultInjectionOperationType.QUERY_ITEM, false }, { FaultInjectionOperationType.PATCH_ITEM, true } }; } @DataProvider(name = "faultInjectionServerErrorResponseProvider") public static Object[][] faultInjectionServerErrorResponseProvider() { return new Object[][]{ { FaultInjectionServerErrorType.INTERNAL_SERVER_ERROR, false, 500, 0 }, { FaultInjectionServerErrorType.RETRY_WITH, true, 449, 0 }, { FaultInjectionServerErrorType.TOO_MANY_REQUEST, true, 429, 0 }, { FaultInjectionServerErrorType.READ_SESSION_NOT_AVAILABLE, true, 404, 1002 }, { FaultInjectionServerErrorType.TIMEOUT, false, 408, 0 }, { FaultInjectionServerErrorType.PARTITION_IS_MIGRATING, true, 410, 1008 }, { FaultInjectionServerErrorType.PARTITION_IS_SPLITTING, true, 410, 1007 } }; } @Test(groups = {"multi-region", "simple"}, dataProvider = "operationTypeProvider", timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_OperationType(OperationType operationType) throws JsonProcessingException { String serverGoneRuleId = "serverErrorRule-serverGone-" + UUID.randomUUID(); FaultInjectionRule serverGoneErrorRule = new FaultInjectionRuleBuilder(serverGoneRuleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.READ_ITEM) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); String tooManyRequestsRuleId = "serverErrorRule-tooManyRequests-" + UUID.randomUUID(); FaultInjectionRule serverTooManyRequestsErrorRule = new FaultInjectionRuleBuilder(tooManyRequestsRuleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.READ_ITEM) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.TOO_MANY_REQUEST) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { TestItem createdItem = TestItem.createNewItem(); cosmosAsyncContainer.createItem(createdItem).block(); CosmosFaultInjectionHelper.configureFaultInjectionRules(cosmosAsyncContainer, Arrays.asList(serverGoneErrorRule)).block(); assertThat(serverGoneErrorRule.getAddresses().size()).isZero(); assertThat(serverGoneErrorRule.getRegionEndpoints().size() == this.readRegionMap.size() + 1 && serverGoneErrorRule.getRegionEndpoints().containsAll(this.readRegionMap.values())); CosmosDiagnostics cosmosDiagnostics = this.performDocumentOperation(cosmosAsyncContainer, operationType, createdItem); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, operationType, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.UNKNOWN, serverGoneRuleId, true); serverGoneErrorRule.disable(); CosmosFaultInjectionHelper.configureFaultInjectionRules(cosmosAsyncContainer, Arrays.asList(serverTooManyRequestsErrorRule)).block(); assertThat(serverGoneErrorRule.getAddresses().size()).isZero(); assertThat(serverGoneErrorRule.getRegionEndpoints().size() == this.readRegionMap.size() + 1 && serverGoneErrorRule.getRegionEndpoints().containsAll(this.readRegionMap.values())); cosmosDiagnostics = this.performDocumentOperation(cosmosAsyncContainer, operationType, createdItem); if (operationType == OperationType.Read) { assertThat(serverTooManyRequestsErrorRule.getHitCount()).isEqualTo(1); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, operationType, HttpConstants.StatusCodes.TOO_MANY_REQUESTS, HttpConstants.SubStatusCodes.UNKNOWN, tooManyRequestsRuleId, true); } else { this.validateNoFaultInjectionApplied(cosmosDiagnostics, operationType); } } finally { serverGoneErrorRule.disable(); serverTooManyRequestsErrorRule.disable(); } } @Test(groups = {"multi-region"}, dataProvider = "operationTypeProvider", timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_OperationTypeImpactAddresses(OperationType operationType) throws JsonProcessingException { TestItem createdItem = TestItem.createNewItem(); this.cosmosAsyncContainer.createItem(createdItem).block(); String writeRegionServerGoneRuleId = "serverErrorRule-writeRegionOnly-" + UUID.randomUUID(); FaultInjectionRule writeRegionServerGoneErrorRule = new FaultInjectionRuleBuilder(writeRegionServerGoneRuleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.CREATE_ITEM) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); String primaryReplicaServerGoneRuleId = "serverErrorRule-primaryReplicaOnly-" + UUID.randomUUID(); FaultInjectionRule primaryReplicaServerGoneErrorRule = new FaultInjectionRuleBuilder(primaryReplicaServerGoneRuleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.CREATE_ITEM) .endpoints( new FaultInjectionEndpointBuilder(FeedRange.forLogicalPartition(new PartitionKey(createdItem.getId()))) .replicaCount(3) .build()) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); List<String> preferredRegionList = new ArrayList<>(); for (String region : this.readRegionMap.keySet()) { if (this.writeRegionMap.containsKey(region)) { preferredRegionList.add(region); } else { preferredRegionList.add(0, region); } } CosmosAsyncClient clientWithPreferredRegions; try { clientWithPreferredRegions = new CosmosClientBuilder() .key(TestConfigurations.MASTER_KEY) .endpoint(TestConfigurations.HOST) .preferredRegions(preferredRegionList) .consistencyLevel(ConsistencyLevel.EVENTUAL) .buildAsyncClient(); CosmosAsyncContainer container = clientWithPreferredRegions .getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosFaultInjectionHelper.configureFaultInjectionRules(container, Arrays.asList(writeRegionServerGoneErrorRule)).block(); assertThat(writeRegionServerGoneErrorRule.getRegionEndpoints().size()).isEqualTo(2); CosmosDiagnostics cosmosDiagnostics = this.performDocumentOperation(container, operationType, createdItem); if (operationType.isWriteOperation()) { assertThat(writeRegionServerGoneErrorRule.getHitCount()).isEqualTo(1); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, operationType, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.UNKNOWN, writeRegionServerGoneRuleId, true); } else { this.validateNoFaultInjectionApplied(cosmosDiagnostics, operationType); } writeRegionServerGoneErrorRule.disable(); CosmosFaultInjectionHelper.configureFaultInjectionRules(container, Arrays.asList(primaryReplicaServerGoneErrorRule)).block(); assertThat( primaryReplicaServerGoneErrorRule.getRegionEndpoints().size() == this.writeRegionMap.size() && primaryReplicaServerGoneErrorRule.getRegionEndpoints().containsAll(this.writeRegionMap.keySet())); assertThat(primaryReplicaServerGoneErrorRule.getAddresses().size() == this.writeRegionMap.size()); } finally { writeRegionServerGoneErrorRule.disable(); primaryReplicaServerGoneErrorRule.disable(); } } @Test(groups = {"multi-region"}, timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_Region() throws JsonProcessingException { List<String> preferredLocations = this.readRegionMap.keySet().stream().collect(Collectors.toList()); CosmosAsyncClient clientWithPreferredRegion = null; String localRegionRuleId = "ServerErrorRule-LocalRegion-" + UUID.randomUUID(); FaultInjectionRule serverErrorRuleLocalRegion = new FaultInjectionRuleBuilder(localRegionRuleId) .condition( new FaultInjectionConditionBuilder() .region(preferredLocations.get(0)) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); String remoteRegionRuleId = "ServerErrorRule-RemoteRegion-" + UUID.randomUUID(); FaultInjectionRule serverErrorRuleRemoteRegion = new FaultInjectionRuleBuilder(remoteRegionRuleId) .condition( new FaultInjectionConditionBuilder() .region(preferredLocations.get(1)) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { clientWithPreferredRegion = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .consistencyLevel(BridgeInternal.getContextClient(this.client).getConsistencyLevel()) .preferredRegions(preferredLocations) .directMode() .buildAsyncClient(); CosmosAsyncContainer container = clientWithPreferredRegion .getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); TestItem createdItem = TestItem.createNewItem(); container.createItem(createdItem).block(); CosmosFaultInjectionHelper.configureFaultInjectionRules( container, Arrays.asList(serverErrorRuleLocalRegion, serverErrorRuleRemoteRegion)) .block(); assertThat( serverErrorRuleLocalRegion.getRegionEndpoints().size() == 1 && serverErrorRuleLocalRegion.getRegionEndpoints().get(0).equals(this.readRegionMap.get(preferredLocations.get(0)))); assertThat( serverErrorRuleRemoteRegion.getRegionEndpoints().size() == 1 && serverErrorRuleRemoteRegion.getRegionEndpoints().get(0).equals(this.readRegionMap.get(preferredLocations.get(1)))); CosmosDiagnostics cosmosDiagnostics = this.performDocumentOperation(container, OperationType.Read, createdItem); assertThat(serverErrorRuleLocalRegion.getHitCount()).isEqualTo(1); assertThat(serverErrorRuleRemoteRegion.getHitCount()).isEqualTo(0); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, OperationType.Read, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.UNKNOWN, localRegionRuleId, true ); serverErrorRuleLocalRegion.disable(); cosmosDiagnostics = this.performDocumentOperation(container, OperationType.Read, createdItem); this.validateNoFaultInjectionApplied(cosmosDiagnostics, OperationType.Read); } finally { serverErrorRuleLocalRegion.disable(); serverErrorRuleRemoteRegion.disable(); safeClose(clientWithPreferredRegion); } } @Test(groups = {"multi-region", "simple"}, timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_Partition() throws JsonProcessingException { TestItem createdItem = TestItem.createNewItem(); cosmosAsyncContainer.createItem(createdItem).block(); List<FeedRange> feedRanges = cosmosAsyncContainer.getFeedRanges().block(); String feedRangeRuleId = "ServerErrorRule-FeedRange-" + UUID.randomUUID(); FaultInjectionRule serverErrorRuleByFeedRange = new FaultInjectionRuleBuilder(feedRangeRuleId) .condition( new FaultInjectionConditionBuilder() .endpoints(new FaultInjectionEndpointBuilder(feedRanges.get(0)).build()) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); CosmosFaultInjectionHelper.configureFaultInjectionRules(cosmosAsyncContainer, Arrays.asList(serverErrorRuleByFeedRange)).block(); assertThat( serverErrorRuleByFeedRange.getRegionEndpoints().size() == this.readRegionMap.size() && serverErrorRuleByFeedRange.getRegionEndpoints().containsAll(this.readRegionMap.keySet())); assertThat(serverErrorRuleByFeedRange.getAddresses().size()).isBetween( this.readRegionMap.size() * 3, this.readRegionMap.size() * 5); String query = "select * from c"; CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions(); queryRequestOptions.setFeedRange(feedRanges.get(0)); CosmosDiagnostics cosmosDiagnostics = cosmosAsyncContainer.queryItems(query, queryRequestOptions, TestItem.class).byPage().blockFirst().getCosmosDiagnostics(); assertThat(serverErrorRuleByFeedRange.getHitCount()).isEqualTo(1); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, OperationType.Query, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.UNKNOWN, feedRangeRuleId, true ); queryRequestOptions.setFeedRange(feedRanges.get(1)); try { cosmosDiagnostics = cosmosAsyncContainer.queryItems(query, queryRequestOptions, TestItem.class).byPage().blockFirst().getCosmosDiagnostics(); this.validateNoFaultInjectionApplied(cosmosDiagnostics, OperationType.Query); } finally { serverErrorRuleByFeedRange.disable(); } } @Test(groups = {"multi-region", "simple"}, timeOut = TIMEOUT) @Test(groups = {"multi-region", "simple"}, timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_ServerConnectionDelay() throws JsonProcessingException { CosmosAsyncClient newClient = null; String ruleId = "serverErrorRule-serverConnectionDelay-" + UUID.randomUUID(); FaultInjectionRule serverConnectionDelayRule = new FaultInjectionRuleBuilder(ruleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.CREATE_ITEM) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.CONNECTION_DELAY) .delay(Duration.ofSeconds(2)) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { DirectConnectionConfig directConnectionConfig = DirectConnectionConfig.getDefaultConfig(); directConnectionConfig.setConnectTimeout(Duration.ofSeconds(1)); newClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .consistencyLevel(BridgeInternal.getContextClient(this.client).getConsistencyLevel()) .directMode(directConnectionConfig) .buildAsyncClient(); CosmosAsyncContainer container = newClient .getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); CosmosFaultInjectionHelper.configureFaultInjectionRules(container, Arrays.asList(serverConnectionDelayRule)).block(); CosmosItemResponse<TestItem> itemResponse = container.createItem(TestItem.createNewItem()).block(); assertThat(serverConnectionDelayRule.getHitCount()).isBetween(1l, 2l); this.validateFaultInjectionRuleApplied( itemResponse.getDiagnostics(), OperationType.Create, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.TRANSPORT_GENERATED_410, ruleId, true ); } finally { serverConnectionDelayRule.disable(); safeClose(newClient); } } @Test(groups = {"multi-region", "simple"}, dataProvider = "faultInjectionOperationTypeProvider", timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_ServerConnectionDelay_warmup( FaultInjectionOperationType operationType, boolean primaryAddressesOnly) { CosmosAsyncClient newClient = null; String ruleId = "serverErrorRule-serverConnectionDelay-warmup" + UUID.randomUUID(); FaultInjectionRule serverConnectionDelayWarmupRule = new FaultInjectionRuleBuilder(ruleId) .condition( new FaultInjectionConditionBuilder() .operationType(operationType) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.CONNECTION_DELAY) .delay(Duration.ofSeconds(2)) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { DirectConnectionConfig directConnectionConfig = DirectConnectionConfig.getDefaultConfig(); directConnectionConfig.setConnectTimeout(Duration.ofSeconds(1)); newClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .consistencyLevel(BridgeInternal.getContextClient(this.client).getConsistencyLevel()) .directMode(directConnectionConfig) .buildAsyncClient(); CosmosAsyncContainer container = newClient .getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); CosmosFaultInjectionHelper.configureFaultInjectionRules(container, Arrays.asList(serverConnectionDelayWarmupRule)).block(); int partitionSize = container.getFeedRanges().block().size(); container.openConnectionsAndInitCaches().block(); if (primaryAddressesOnly) { int primaryAddressCount = partitionSize; int maxConnectionRetriesPerPrimary = primaryAddressCount * 2; assertThat(serverConnectionDelayWarmupRule.getHitCount()).isLessThanOrEqualTo(primaryAddressCount + maxConnectionRetriesPerPrimary); } else { long minSecondaryAddressesCount = 3L * partitionSize; long maxAddressesCount = 5L * partitionSize; long minTotalConnectionEstablishmentAttempts = minSecondaryAddressesCount + 2 * minSecondaryAddressesCount; long maxTotalConnectionEstablishmentAttempts = maxAddressesCount + 2 * maxAddressesCount; assertThat(serverConnectionDelayWarmupRule.getHitCount()).isBetween(minTotalConnectionEstablishmentAttempts, maxTotalConnectionEstablishmentAttempts); } } finally { serverConnectionDelayWarmupRule.disable(); safeClose(newClient); } } @Test(groups = {"multi-region", "simple"}, dataProvider = "faultInjectionServerErrorResponseProvider", timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_ServerErrorResponse( FaultInjectionServerErrorType serverErrorType, boolean canRetry, int errorStatusCode, int errorSubStatusCode) throws JsonProcessingException { String ruleId = "serverErrorRule-" + serverErrorType + "-" + UUID.randomUUID(); FaultInjectionRule serverErrorRule = new FaultInjectionRuleBuilder(ruleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.READ_ITEM) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(serverErrorType) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { TestItem createdItem = TestItem.createNewItem(); cosmosAsyncContainer.createItem(createdItem).block(); CosmosFaultInjectionHelper.configureFaultInjectionRules(cosmosAsyncContainer, Arrays.asList(serverErrorRule)).block(); CosmosDiagnostics cosmosDiagnostics = null; if (canRetry) { try { cosmosDiagnostics = cosmosAsyncContainer .readItem(createdItem.getId(), new PartitionKey(createdItem.getId()), TestItem.class) .block() .getDiagnostics(); } catch (Exception exception) { fail("Request should succeeded, but failed with " + exception); } } else { try { cosmosDiagnostics = cosmosAsyncContainer .readItem(createdItem.getId(), new PartitionKey(createdItem.getId()), TestItem.class) .block() .getDiagnostics(); fail("Request should fail, but succeeded"); } catch (Exception e) { cosmosDiagnostics = ((CosmosException)e).getDiagnostics(); } } assertThat(serverErrorRule.getHitCount()).isEqualTo(1); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, OperationType.Read, errorStatusCode, errorSubStatusCode, ruleId, canRetry ); } finally { serverErrorRule.disable(); } } @Test(groups = {"multi-region", "simple"}, timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_HitLimit() throws JsonProcessingException { TestItem createdItem = TestItem.createNewItem(); cosmosAsyncContainer.createItem(createdItem).block(); String hitLimitRuleId = "ServerErrorRule-hitLimit-" + UUID.randomUUID(); FaultInjectionRule hitLimitServerErrorRule = new FaultInjectionRuleBuilder(hitLimitRuleId) .condition( new FaultInjectionConditionBuilder() .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .hitLimit(2) .build(); try { CosmosFaultInjectionHelper.configureFaultInjectionRules(cosmosAsyncContainer, Arrays.asList(hitLimitServerErrorRule)).block(); assertThat( hitLimitServerErrorRule.getRegionEndpoints().size() == this.readRegionMap.size() && hitLimitServerErrorRule.getRegionEndpoints().containsAll(this.readRegionMap.keySet())); assertThat(hitLimitServerErrorRule.getAddresses().size() == 0); for (int i = 1; i <= 3; i++) { CosmosDiagnostics cosmosDiagnostics = this.performDocumentOperation(cosmosAsyncContainer, OperationType.Read, createdItem); if (i <= 2) { this.validateFaultInjectionRuleApplied( cosmosDiagnostics, OperationType.Read, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.UNKNOWN, hitLimitRuleId, true ); } else { cosmosDiagnostics = this.performDocumentOperation(cosmosAsyncContainer, OperationType.Read, createdItem); this.validateNoFaultInjectionApplied(cosmosDiagnostics, OperationType.Read); } } assertThat(hitLimitServerErrorRule.getHitCount()).isEqualTo(2); } finally { hitLimitServerErrorRule.disable(); } } @AfterClass(groups = {"multi-region", "simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private CosmosDiagnostics performDocumentOperation( CosmosAsyncContainer cosmosAsyncContainer, OperationType operationType, TestItem createdItem) { try { if (operationType == OperationType.Query) { CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions(); String query = String.format("SELECT * from c where c.id = '%s'", createdItem.getId()); FeedResponse<TestItem> itemFeedResponse = cosmosAsyncContainer.queryItems(query, queryRequestOptions, TestItem.class).byPage().blockFirst(); return itemFeedResponse.getCosmosDiagnostics(); } if (operationType == OperationType.Read || operationType == OperationType.Delete || operationType == OperationType.Replace || operationType == OperationType.Create || operationType == OperationType.Patch || operationType == OperationType.Upsert) { if (operationType == OperationType.Read) { return cosmosAsyncContainer.readItem( createdItem.getId(), new PartitionKey(createdItem.getId()), TestItem.class).block().getDiagnostics(); } if (operationType == OperationType.Replace) { return cosmosAsyncContainer.replaceItem( createdItem, createdItem.getId(), new PartitionKey(createdItem.getId())).block().getDiagnostics(); } if (operationType == OperationType.Delete) { return cosmosAsyncContainer.deleteItem(createdItem, null).block().getDiagnostics(); } if (operationType == OperationType.Create) { return cosmosAsyncContainer.createItem(TestItem.createNewItem()).block().getDiagnostics(); } if (operationType == OperationType.Upsert) { return cosmosAsyncContainer.upsertItem(TestItem.createNewItem()).block().getDiagnostics(); } if (operationType == OperationType.Patch) { CosmosPatchOperations patchOperations = CosmosPatchOperations .create() .add("newPath", "newPath"); return cosmosAsyncContainer .patchItem(createdItem.getId(), new PartitionKey(createdItem.getId()), patchOperations, TestItem.class) .block().getDiagnostics(); } } throw new IllegalArgumentException("The operation type is not supported"); } catch (CosmosException cosmosException) { return cosmosException.getDiagnostics(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_includePrimary() throws JsonProcessingException { TestItem createdItem = TestItem.createNewItem(); CosmosAsyncContainer singlePartitionContainer = getSharedSinglePartitionCosmosContainer(client); List<FeedRange> feedRanges = singlePartitionContainer.getFeedRanges().block(); String serverGoneIncludePrimaryRuleId = "serverErrorRule-includePrimary-" + UUID.randomUUID(); FaultInjectionRule serverGoneIncludePrimaryErrorRule = new FaultInjectionRuleBuilder(serverGoneIncludePrimaryRuleId) .condition( new FaultInjectionConditionBuilder() .endpoints( new FaultInjectionEndpointBuilder(feedRanges.get(0)) .replicaCount(1) .includePrimary(true) .build() ) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { CosmosFaultInjectionHelper.configureFaultInjectionRules(singlePartitionContainer, Arrays.asList(serverGoneIncludePrimaryErrorRule)).block(); CosmosDiagnostics cosmosDiagnostics = this.performDocumentOperation(singlePartitionContainer, OperationType.Create, createdItem); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, OperationType.Create, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.UNKNOWN, serverGoneIncludePrimaryRuleId, true); cosmosDiagnostics = this.performDocumentOperation(singlePartitionContainer, OperationType.Upsert, createdItem); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, OperationType.Upsert, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.UNKNOWN, serverGoneIncludePrimaryRuleId, true); } finally { serverGoneIncludePrimaryErrorRule.disable(); } } private void validateFaultInjectionRuleApplied( CosmosDiagnostics cosmosDiagnostics, OperationType operationType, int statusCode, int subStatusCode, String ruleId, boolean canRetryOnFaultInjectedError) throws JsonProcessingException { List<ObjectNode> diagnosticsNode = new ArrayList<>(); if (operationType == OperationType.Query) { int clientSideDiagnosticsIndex = cosmosDiagnostics.toString().indexOf("[{\"userAgent\""); ArrayNode arrayNode = (ArrayNode) Utils.getSimpleObjectMapper().readTree(cosmosDiagnostics.toString().substring(clientSideDiagnosticsIndex)); for (JsonNode node : arrayNode) { diagnosticsNode.add((ObjectNode) node); } } else { diagnosticsNode.add((ObjectNode) Utils.getSimpleObjectMapper().readTree(cosmosDiagnostics.toString())); } for (ObjectNode diagnosticNode : diagnosticsNode) { JsonNode responseStatisticsList = diagnosticNode.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); if (canRetryOnFaultInjectedError) { assertThat(responseStatisticsList.size()).isEqualTo(2); } else { assertThat(responseStatisticsList.size()).isOne(); } JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("statusCode").asInt()).isEqualTo(statusCode); assertThat(storeResult.get("subStatusCode").asInt()).isEqualTo(subStatusCode); assertThat(storeResult.get("faultInjectionRuleId").asText()).isEqualTo(ruleId); } } private void validateNoFaultInjectionApplied( CosmosDiagnostics cosmosDiagnostics, OperationType operationType) throws JsonProcessingException { List<ObjectNode> diagnosticsNode = new ArrayList<>(); if (operationType == OperationType.Query) { int clientSideDiagnosticsIndex = cosmosDiagnostics.toString().indexOf("[{\"userAgent\""); ArrayNode arrayNode = (ArrayNode) Utils.getSimpleObjectMapper().readTree(cosmosDiagnostics.toString().substring(clientSideDiagnosticsIndex)); for (JsonNode node : arrayNode) { diagnosticsNode.add((ObjectNode) node); } } else { diagnosticsNode.add((ObjectNode) Utils.getSimpleObjectMapper().readTree(cosmosDiagnostics.toString())); } for (ObjectNode diagnosticNode : diagnosticsNode) { JsonNode responseStatisticsList = diagnosticNode.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); for (int i = 0; i < responseStatisticsList.size(); i++) { JsonNode storeResult = responseStatisticsList.get(i).get("storeResult"); assertThat(storeResult.get("faultInjectionRuleId")).isNull(); } assertThat(responseStatisticsList.size()).isOne(); } } private Map<String, String> getRegionMap(DatabaseAccount databaseAccount, boolean writeOnly) { Iterator<DatabaseAccountLocation> locationIterator = writeOnly ? databaseAccount.getWritableLocations().iterator() : databaseAccount.getReadableLocations().iterator(); Map<String, String> regionMap = new ConcurrentHashMap<>(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); regionMap.put(accountLocation.getName(), accountLocation.getEndpoint()); } return regionMap; } }
class FaultInjectionServerErrorRuleTests extends TestSuiteBase { private static final int TIMEOUT = 60000; private static final String FAULT_INJECTION_RULE_NON_APPLICABLE_ADDRESS = "Addresses mismatch"; private static final String FAULT_INJECTION_RULE_NON_APPLICABLE_OPERATION_TYPE = "OperationType mismatch"; private static final String FAULT_INJECTION_RULE_NON_APPLICABLE_REGION_ENDPOINT = "RegionEndpoint mismatch"; private static final String FAULT_INJECTION_RULE_NON_APPLICABLE_HIT_LIMIT = "Hit Limit reached"; private CosmosAsyncClient client; private CosmosAsyncContainer cosmosAsyncContainer; private DatabaseAccount databaseAccount; private Map<String, String> readRegionMap; private Map<String, String> writeRegionMap; @Factory(dataProvider = "simpleClientBuildersWithJustDirectTcp") public FaultInjectionServerErrorRuleTests(CosmosClientBuilder clientBuilder) { super(clientBuilder); this.subscriberValidationTimeout = TIMEOUT; } @BeforeClass(groups = {"multi-region", "simple"}, timeOut = TIMEOUT) public void beforeClass() { client = getClientBuilder().buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(client); GlobalEndpointManager globalEndpointManager = asyncDocumentClient.getGlobalEndpointManager(); DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); this.databaseAccount = databaseAccount; this.cosmosAsyncContainer = getSharedMultiPartitionCosmosContainerWithIdAsPartitionKey(client); this.readRegionMap = this.getRegionMap(databaseAccount, false); this.writeRegionMap = this.getRegionMap(databaseAccount, true); } @DataProvider(name = "operationTypeProvider") public static Object[][] operationTypeProvider() { return new Object[][]{ { OperationType.Read }, { OperationType.Replace }, { OperationType.Create }, { OperationType.Delete }, { OperationType.Query }, { OperationType.Patch } }; } @DataProvider(name = "faultInjectionOperationTypeProvider") public static Object[][] faultInjectionOperationTypeProvider() { return new Object[][]{ { FaultInjectionOperationType.READ_ITEM, false }, { FaultInjectionOperationType.REPLACE_ITEM, true }, { FaultInjectionOperationType.CREATE_ITEM, true }, { FaultInjectionOperationType.DELETE_ITEM, true}, { FaultInjectionOperationType.QUERY_ITEM, false }, { FaultInjectionOperationType.PATCH_ITEM, true } }; } @DataProvider(name = "faultInjectionServerErrorResponseProvider") public static Object[][] faultInjectionServerErrorResponseProvider() { return new Object[][]{ { FaultInjectionServerErrorType.GONE, true, 410, HttpConstants.SubStatusCodes.SERVER_GENERATED_410 }, { FaultInjectionServerErrorType.INTERNAL_SERVER_ERROR, false, 500, 0 }, { FaultInjectionServerErrorType.RETRY_WITH, true, 449, 0 }, { FaultInjectionServerErrorType.TOO_MANY_REQUEST, true, 429, 0 }, { FaultInjectionServerErrorType.READ_SESSION_NOT_AVAILABLE, true, 404, 1002 }, { FaultInjectionServerErrorType.TIMEOUT, true, 410, HttpConstants.SubStatusCodes.SERVER_GENERATED_408 }, { FaultInjectionServerErrorType.PARTITION_IS_MIGRATING, true, 410, 1008 }, { FaultInjectionServerErrorType.PARTITION_IS_SPLITTING, true, 410, 1007 } }; } @Test(groups = {"multi-region", "simple"}, dataProvider = "operationTypeProvider", timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_OperationType(OperationType operationType) throws JsonProcessingException { String serverGoneRuleId = "serverErrorRule-serverGone-" + UUID.randomUUID(); FaultInjectionRule serverGoneErrorRule = new FaultInjectionRuleBuilder(serverGoneRuleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.READ_ITEM) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); String tooManyRequestsRuleId = "serverErrorRule-tooManyRequests-" + UUID.randomUUID(); FaultInjectionRule serverTooManyRequestsErrorRule = new FaultInjectionRuleBuilder(tooManyRequestsRuleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.READ_ITEM) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.TOO_MANY_REQUEST) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { TestItem createdItem = TestItem.createNewItem(); cosmosAsyncContainer.createItem(createdItem).block(); CosmosFaultInjectionHelper.configureFaultInjectionRules(cosmosAsyncContainer, Arrays.asList(serverGoneErrorRule)).block(); assertThat(serverGoneErrorRule.getAddresses().size()).isZero(); assertThat(serverGoneErrorRule.getRegionEndpoints().size() == this.readRegionMap.size() + 1 && serverGoneErrorRule.getRegionEndpoints().containsAll(this.readRegionMap.values())); CosmosDiagnostics cosmosDiagnostics = this.performDocumentOperation(cosmosAsyncContainer, operationType, createdItem); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, operationType, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.SERVER_GENERATED_410, serverGoneRuleId, true); serverGoneErrorRule.disable(); CosmosFaultInjectionHelper.configureFaultInjectionRules(cosmosAsyncContainer, Arrays.asList(serverTooManyRequestsErrorRule)).block(); assertThat(serverGoneErrorRule.getAddresses().size()).isZero(); assertThat(serverGoneErrorRule.getRegionEndpoints().size() == this.readRegionMap.size() + 1 && serverGoneErrorRule.getRegionEndpoints().containsAll(this.readRegionMap.values())); cosmosDiagnostics = this.performDocumentOperation(cosmosAsyncContainer, operationType, createdItem); if (operationType == OperationType.Read) { this.validateHitCount(serverTooManyRequestsErrorRule, 1, OperationType.Read, ResourceType.Document); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, operationType, HttpConstants.StatusCodes.TOO_MANY_REQUESTS, HttpConstants.SubStatusCodes.UNKNOWN, tooManyRequestsRuleId, true); } else { this.validateNoFaultInjectionApplied(cosmosDiagnostics, operationType, FAULT_INJECTION_RULE_NON_APPLICABLE_OPERATION_TYPE); } } finally { serverGoneErrorRule.disable(); serverTooManyRequestsErrorRule.disable(); } } @Test(groups = {"multi-region"}, dataProvider = "operationTypeProvider", timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_OperationTypeImpactAddresses(OperationType operationType) throws JsonProcessingException { TestItem createdItem = TestItem.createNewItem(); this.cosmosAsyncContainer.createItem(createdItem).block(); String writeRegionServerGoneRuleId = "serverErrorRule-writeRegionOnly-" + UUID.randomUUID(); FaultInjectionRule writeRegionServerGoneErrorRule = new FaultInjectionRuleBuilder(writeRegionServerGoneRuleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.CREATE_ITEM) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); String primaryReplicaServerGoneRuleId = "serverErrorRule-primaryReplicaOnly-" + UUID.randomUUID(); FaultInjectionRule primaryReplicaServerGoneErrorRule = new FaultInjectionRuleBuilder(primaryReplicaServerGoneRuleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.CREATE_ITEM) .endpoints( new FaultInjectionEndpointBuilder(FeedRange.forLogicalPartition(new PartitionKey(createdItem.getId()))) .replicaCount(3) .build()) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); List<String> preferredRegionList = new ArrayList<>(); for (String region : this.readRegionMap.keySet()) { if (this.writeRegionMap.containsKey(region)) { preferredRegionList.add(region); } else { preferredRegionList.add(0, region); } } logger.info( "Inside fault injection test, OperationType {}, write region {}, read region", operationType, this.writeRegionMap.values(), this.readRegionMap.values()); CosmosAsyncClient clientWithPreferredRegions; try { clientWithPreferredRegions = new CosmosClientBuilder() .key(TestConfigurations.MASTER_KEY) .endpoint(TestConfigurations.HOST) .preferredRegions(preferredRegionList) .consistencyLevel(ConsistencyLevel.EVENTUAL) .buildAsyncClient(); CosmosAsyncContainer container = clientWithPreferredRegions .getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); CosmosFaultInjectionHelper.configureFaultInjectionRules(container, Arrays.asList(writeRegionServerGoneErrorRule)).block(); assertThat(writeRegionServerGoneErrorRule.getRegionEndpoints().size()).isEqualTo(this.writeRegionMap.size() + 1); CosmosDiagnostics cosmosDiagnostics = this.performDocumentOperation(container, operationType, createdItem); if (operationType.isWriteOperation()) { this.validateHitCount(writeRegionServerGoneErrorRule, 1, operationType, ResourceType.Document); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, operationType, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.SERVER_GENERATED_410, writeRegionServerGoneRuleId, true); } else { this.validateNoFaultInjectionApplied(cosmosDiagnostics, operationType, FAULT_INJECTION_RULE_NON_APPLICABLE_ADDRESS); } writeRegionServerGoneErrorRule.disable(); CosmosFaultInjectionHelper.configureFaultInjectionRules(container, Arrays.asList(primaryReplicaServerGoneErrorRule)).block(); assertThat(primaryReplicaServerGoneErrorRule.getRegionEndpoints().size()).isEqualTo(this.writeRegionMap.size() + 1); assertThat(primaryReplicaServerGoneErrorRule.getRegionEndpoints().containsAll(this.writeRegionMap.values())).isTrue(); assertThat(primaryReplicaServerGoneErrorRule.getAddresses().size()).isEqualTo(this.writeRegionMap.size()); } finally { writeRegionServerGoneErrorRule.disable(); primaryReplicaServerGoneErrorRule.disable(); } } @Test(groups = {"multi-region"}, timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_Region() throws JsonProcessingException { List<String> preferredLocations = this.readRegionMap.keySet().stream().collect(Collectors.toList()); CosmosAsyncClient clientWithPreferredRegion = null; String localRegionRuleId = "ServerErrorRule-LocalRegion-" + UUID.randomUUID(); FaultInjectionRule serverErrorRuleLocalRegion = new FaultInjectionRuleBuilder(localRegionRuleId) .condition( new FaultInjectionConditionBuilder() .region(preferredLocations.get(0)) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); String remoteRegionRuleId = "ServerErrorRule-RemoteRegion-" + UUID.randomUUID(); FaultInjectionRule serverErrorRuleRemoteRegion = new FaultInjectionRuleBuilder(remoteRegionRuleId) .condition( new FaultInjectionConditionBuilder() .region(preferredLocations.get(1)) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { clientWithPreferredRegion = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .consistencyLevel(BridgeInternal.getContextClient(this.client).getConsistencyLevel()) .preferredRegions(preferredLocations) .directMode() .buildAsyncClient(); CosmosAsyncContainer container = clientWithPreferredRegion .getDatabase(this.cosmosAsyncContainer.getDatabase().getId()) .getContainer(this.cosmosAsyncContainer.getId()); TestItem createdItem = TestItem.createNewItem(); container.createItem(createdItem).block(); CosmosFaultInjectionHelper.configureFaultInjectionRules( container, Arrays.asList(serverErrorRuleLocalRegion, serverErrorRuleRemoteRegion)) .block(); assertThat( serverErrorRuleLocalRegion.getRegionEndpoints().size() == 1 && serverErrorRuleLocalRegion.getRegionEndpoints().get(0).equals(this.readRegionMap.get(preferredLocations.get(0)))); assertThat( serverErrorRuleRemoteRegion.getRegionEndpoints().size() == 1 && serverErrorRuleRemoteRegion.getRegionEndpoints().get(0).equals(this.readRegionMap.get(preferredLocations.get(1)))); CosmosDiagnostics cosmosDiagnostics = this.performDocumentOperation(container, OperationType.Read, createdItem); this.validateHitCount(serverErrorRuleLocalRegion, 1, OperationType.Read, ResourceType.Document); this.validateHitCount(serverErrorRuleRemoteRegion, 0, OperationType.Read, ResourceType.Document); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, OperationType.Read, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.SERVER_GENERATED_410, localRegionRuleId, true ); serverErrorRuleLocalRegion.disable(); cosmosDiagnostics = this.performDocumentOperation(container, OperationType.Read, createdItem); this.validateNoFaultInjectionApplied(cosmosDiagnostics, OperationType.Read, FAULT_INJECTION_RULE_NON_APPLICABLE_REGION_ENDPOINT); } finally { serverErrorRuleLocalRegion.disable(); serverErrorRuleRemoteRegion.disable(); safeClose(clientWithPreferredRegion); } } @Test(groups = {"multi-region", "simple"}, timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_Partition() throws JsonProcessingException { TestItem createdItem = TestItem.createNewItem(); cosmosAsyncContainer.createItem(createdItem).block(); List<FeedRange> feedRanges = cosmosAsyncContainer.getFeedRanges().block(); String feedRangeRuleId = "ServerErrorRule-FeedRange-" + UUID.randomUUID(); FaultInjectionRule serverErrorRuleByFeedRange = new FaultInjectionRuleBuilder(feedRangeRuleId) .condition( new FaultInjectionConditionBuilder() .endpoints(new FaultInjectionEndpointBuilder(feedRanges.get(0)).build()) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); CosmosFaultInjectionHelper.configureFaultInjectionRules(cosmosAsyncContainer, Arrays.asList(serverErrorRuleByFeedRange)).block(); assertThat( serverErrorRuleByFeedRange.getRegionEndpoints().size() == this.readRegionMap.size() && serverErrorRuleByFeedRange.getRegionEndpoints().containsAll(this.readRegionMap.keySet())); assertThat(serverErrorRuleByFeedRange.getAddresses().size()).isBetween( this.readRegionMap.size() * 3, this.readRegionMap.size() * 5); String query = "select * from c"; CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions(); queryRequestOptions.setFeedRange(feedRanges.get(0)); CosmosDiagnostics cosmosDiagnostics = cosmosAsyncContainer.queryItems(query, queryRequestOptions, TestItem.class).byPage().blockFirst().getCosmosDiagnostics(); this.validateHitCount(serverErrorRuleByFeedRange, 1, OperationType.Query, ResourceType.Document); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, OperationType.Query, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.SERVER_GENERATED_410, feedRangeRuleId, true ); queryRequestOptions.setFeedRange(feedRanges.get(1)); try { cosmosDiagnostics = cosmosAsyncContainer.queryItems(query, queryRequestOptions, TestItem.class).byPage().blockFirst().getCosmosDiagnostics(); this.validateNoFaultInjectionApplied(cosmosDiagnostics, OperationType.Query, FAULT_INJECTION_RULE_NON_APPLICABLE_ADDRESS); } finally { serverErrorRuleByFeedRange.disable(); } } @Test(groups = {"multi-region", "simple"}, timeOut = TIMEOUT) @Test(groups = {"multi-region", "simple"}, timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_ServerConnectionDelay() throws JsonProcessingException { CosmosAsyncClient newClient = null; String ruleId = "serverErrorRule-serverConnectionDelay-" + UUID.randomUUID(); FaultInjectionRule serverConnectionDelayRule = new FaultInjectionRuleBuilder(ruleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.CREATE_ITEM) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.CONNECTION_DELAY) .delay(Duration.ofSeconds(2)) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { DirectConnectionConfig directConnectionConfig = DirectConnectionConfig.getDefaultConfig(); directConnectionConfig.setConnectTimeout(Duration.ofSeconds(1)); newClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .consistencyLevel(BridgeInternal.getContextClient(this.client).getConsistencyLevel()) .directMode(directConnectionConfig) .buildAsyncClient(); CosmosAsyncContainer container = newClient .getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); CosmosFaultInjectionHelper.configureFaultInjectionRules(container, Arrays.asList(serverConnectionDelayRule)).block(); CosmosItemResponse<TestItem> itemResponse = container.createItem(TestItem.createNewItem()).block(); assertThat(serverConnectionDelayRule.getHitCount()).isBetween(1l, 2l); this.validateFaultInjectionRuleApplied( itemResponse.getDiagnostics(), OperationType.Create, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.TRANSPORT_GENERATED_410, ruleId, true ); } finally { serverConnectionDelayRule.disable(); safeClose(newClient); } } @Test(groups = {"multi-region", "simple"}, dataProvider = "faultInjectionOperationTypeProvider", timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_ServerConnectionDelay_warmup( FaultInjectionOperationType operationType, boolean primaryAddressesOnly) { CosmosAsyncClient newClient = null; String ruleId = "serverErrorRule-serverConnectionDelay-warmup" + UUID.randomUUID(); FaultInjectionRule serverConnectionDelayWarmupRule = new FaultInjectionRuleBuilder(ruleId) .condition( new FaultInjectionConditionBuilder() .operationType(operationType) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.CONNECTION_DELAY) .delay(Duration.ofSeconds(2)) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { DirectConnectionConfig directConnectionConfig = DirectConnectionConfig.getDefaultConfig(); directConnectionConfig.setConnectTimeout(Duration.ofSeconds(1)); newClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .consistencyLevel(BridgeInternal.getContextClient(this.client).getConsistencyLevel()) .directMode(directConnectionConfig) .buildAsyncClient(); CosmosAsyncContainer container = newClient .getDatabase(cosmosAsyncContainer.getDatabase().getId()) .getContainer(cosmosAsyncContainer.getId()); logger.info("serverConnectionDelayWarmupRule: get all the addresses"); List<FeedRange> feedRanges = container.getFeedRanges().block(); for (FeedRange feedRange : feedRanges) { String feedRangeRuleId = "serverErrorRule-test-feedRang" + feedRange.toString(); FaultInjectionRule feedRangeRule = new FaultInjectionRuleBuilder(feedRangeRuleId) .condition( new FaultInjectionConditionBuilder() .endpoints( new FaultInjectionEndpointBuilder(feedRange).build() ) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.TOO_MANY_REQUEST) .build() ) .duration(Duration.ofMinutes(1)) .build(); CosmosFaultInjectionHelper.configureFaultInjectionRules(container, Arrays.asList(feedRangeRule)).block(); logger.info("serverConnectionDelayWarmupRul. FeedRange {}, Addresses {}", feedRange, feedRangeRule.getAddresses()); feedRangeRule.disable(); } CosmosFaultInjectionHelper.configureFaultInjectionRules(container, Arrays.asList(serverConnectionDelayWarmupRule)).block(); int partitionSize = container.getFeedRanges().block().size(); container.openConnectionsAndInitCaches().block(); if (primaryAddressesOnly) { logger.info( "serverConnectionDelayWarmupRule. PartitionSize {}, hitCount{}, hitDetails {}", partitionSize, serverConnectionDelayWarmupRule.getHitCount(), serverConnectionDelayWarmupRule.getHitCountDetails()); int primaryAddressCount = partitionSize; int maxConnectionRetriesPerPrimary = primaryAddressCount * 2; assertThat(serverConnectionDelayWarmupRule.getHitCount()).isLessThanOrEqualTo(primaryAddressCount + maxConnectionRetriesPerPrimary); this.validateHitCount( serverConnectionDelayWarmupRule, serverConnectionDelayWarmupRule.getHitCount(), OperationType.Create, ResourceType.Connection); } else { long minSecondaryAddressesCount = 3L * partitionSize; long maxAddressesCount = 5L * partitionSize; long minTotalConnectionEstablishmentAttempts = minSecondaryAddressesCount + 2 * minSecondaryAddressesCount; long maxTotalConnectionEstablishmentAttempts = maxAddressesCount + 2 * maxAddressesCount; assertThat(serverConnectionDelayWarmupRule.getHitCount()).isBetween(minTotalConnectionEstablishmentAttempts, maxTotalConnectionEstablishmentAttempts); this.validateHitCount( serverConnectionDelayWarmupRule, serverConnectionDelayWarmupRule.getHitCount(), OperationType.Create, ResourceType.Connection); } } finally { serverConnectionDelayWarmupRule.disable(); safeClose(newClient); } } @Test(groups = {"multi-region", "simple"}, dataProvider = "faultInjectionServerErrorResponseProvider", timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_ServerErrorResponse( FaultInjectionServerErrorType serverErrorType, boolean canRetry, int errorStatusCode, int errorSubStatusCode) throws JsonProcessingException { String ruleId = "serverErrorRule-" + serverErrorType + "-" + UUID.randomUUID(); FaultInjectionRule serverErrorRule = new FaultInjectionRuleBuilder(ruleId) .condition( new FaultInjectionConditionBuilder() .operationType(FaultInjectionOperationType.READ_ITEM) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(serverErrorType) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { TestItem createdItem = TestItem.createNewItem(); cosmosAsyncContainer.createItem(createdItem).block(); CosmosFaultInjectionHelper.configureFaultInjectionRules(cosmosAsyncContainer, Arrays.asList(serverErrorRule)).block(); CosmosDiagnostics cosmosDiagnostics = null; if (canRetry) { try { cosmosDiagnostics = cosmosAsyncContainer .readItem(createdItem.getId(), new PartitionKey(createdItem.getId()), TestItem.class) .block() .getDiagnostics(); } catch (Exception exception) { fail("Request should succeeded, but failed with " + exception); } } else { try { cosmosDiagnostics = cosmosAsyncContainer .readItem(createdItem.getId(), new PartitionKey(createdItem.getId()), TestItem.class) .block() .getDiagnostics(); fail("Request should fail, but succeeded"); } catch (Exception e) { cosmosDiagnostics = ((CosmosException)e).getDiagnostics(); } } this.validateHitCount(serverErrorRule, 1, OperationType.Read, ResourceType.Document); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, OperationType.Read, errorStatusCode, errorSubStatusCode, ruleId, canRetry ); } finally { serverErrorRule.disable(); } } @Test(groups = {"multi-region", "simple"}, timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_HitLimit() throws JsonProcessingException { TestItem createdItem = TestItem.createNewItem(); cosmosAsyncContainer.createItem(createdItem).block(); String hitLimitRuleId = "ServerErrorRule-hitLimit-" + UUID.randomUUID(); FaultInjectionRule hitLimitServerErrorRule = new FaultInjectionRuleBuilder(hitLimitRuleId) .condition( new FaultInjectionConditionBuilder() .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .hitLimit(2) .build(); try { CosmosFaultInjectionHelper.configureFaultInjectionRules(cosmosAsyncContainer, Arrays.asList(hitLimitServerErrorRule)).block(); assertThat( hitLimitServerErrorRule.getRegionEndpoints().size() == this.readRegionMap.size() && hitLimitServerErrorRule.getRegionEndpoints().containsAll(this.readRegionMap.keySet())); assertThat(hitLimitServerErrorRule.getAddresses().size() == 0); for (int i = 1; i <= 3; i++) { CosmosDiagnostics cosmosDiagnostics = this.performDocumentOperation(cosmosAsyncContainer, OperationType.Read, createdItem); if (i <= 2) { this.validateFaultInjectionRuleApplied( cosmosDiagnostics, OperationType.Read, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.SERVER_GENERATED_410, hitLimitRuleId, true ); } else { cosmosDiagnostics = this.performDocumentOperation(cosmosAsyncContainer, OperationType.Read, createdItem); this.validateNoFaultInjectionApplied(cosmosDiagnostics, OperationType.Read, FAULT_INJECTION_RULE_NON_APPLICABLE_HIT_LIMIT); } } this.validateHitCount(hitLimitServerErrorRule, 2, OperationType.Read, ResourceType.Document); } finally { hitLimitServerErrorRule.disable(); } } @AfterClass(groups = {"multi-region", "simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private CosmosDiagnostics performDocumentOperation( CosmosAsyncContainer cosmosAsyncContainer, OperationType operationType, TestItem createdItem) { try { if (operationType == OperationType.Query) { CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions(); String query = String.format("SELECT * from c where c.id = '%s'", createdItem.getId()); FeedResponse<TestItem> itemFeedResponse = cosmosAsyncContainer.queryItems(query, queryRequestOptions, TestItem.class).byPage().blockFirst(); return itemFeedResponse.getCosmosDiagnostics(); } if (operationType == OperationType.Read || operationType == OperationType.Delete || operationType == OperationType.Replace || operationType == OperationType.Create || operationType == OperationType.Patch || operationType == OperationType.Upsert) { if (operationType == OperationType.Read) { return cosmosAsyncContainer.readItem( createdItem.getId(), new PartitionKey(createdItem.getId()), TestItem.class).block().getDiagnostics(); } if (operationType == OperationType.Replace) { return cosmosAsyncContainer.replaceItem( createdItem, createdItem.getId(), new PartitionKey(createdItem.getId())).block().getDiagnostics(); } if (operationType == OperationType.Delete) { return cosmosAsyncContainer.deleteItem(createdItem, null).block().getDiagnostics(); } if (operationType == OperationType.Create) { return cosmosAsyncContainer.createItem(TestItem.createNewItem()).block().getDiagnostics(); } if (operationType == OperationType.Upsert) { return cosmosAsyncContainer.upsertItem(TestItem.createNewItem()).block().getDiagnostics(); } if (operationType == OperationType.Patch) { CosmosPatchOperations patchOperations = CosmosPatchOperations .create() .add("newPath", "newPath"); return cosmosAsyncContainer .patchItem(createdItem.getId(), new PartitionKey(createdItem.getId()), patchOperations, TestItem.class) .block().getDiagnostics(); } } throw new IllegalArgumentException("The operation type is not supported"); } catch (CosmosException cosmosException) { return cosmosException.getDiagnostics(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void faultInjectionServerErrorRuleTests_includePrimary() throws JsonProcessingException { TestItem createdItem = TestItem.createNewItem(); CosmosAsyncContainer singlePartitionContainer = getSharedSinglePartitionCosmosContainer(client); List<FeedRange> feedRanges = singlePartitionContainer.getFeedRanges().block(); String serverGoneIncludePrimaryRuleId = "serverErrorRule-includePrimary-" + UUID.randomUUID(); FaultInjectionRule serverGoneIncludePrimaryErrorRule = new FaultInjectionRuleBuilder(serverGoneIncludePrimaryRuleId) .condition( new FaultInjectionConditionBuilder() .endpoints( new FaultInjectionEndpointBuilder(feedRanges.get(0)) .replicaCount(1) .includePrimary(true) .build() ) .build() ) .result( FaultInjectionResultBuilders .getResultBuilder(FaultInjectionServerErrorType.GONE) .times(1) .build() ) .duration(Duration.ofMinutes(5)) .build(); try { CosmosFaultInjectionHelper.configureFaultInjectionRules(singlePartitionContainer, Arrays.asList(serverGoneIncludePrimaryErrorRule)).block(); CosmosDiagnostics cosmosDiagnostics = this.performDocumentOperation(singlePartitionContainer, OperationType.Create, createdItem); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, OperationType.Create, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.SERVER_GENERATED_410, serverGoneIncludePrimaryRuleId, true); cosmosDiagnostics = this.performDocumentOperation(singlePartitionContainer, OperationType.Upsert, createdItem); this.validateFaultInjectionRuleApplied( cosmosDiagnostics, OperationType.Upsert, HttpConstants.StatusCodes.GONE, HttpConstants.SubStatusCodes.SERVER_GENERATED_410, serverGoneIncludePrimaryRuleId, true); } finally { serverGoneIncludePrimaryErrorRule.disable(); } } private void validateFaultInjectionRuleApplied( CosmosDiagnostics cosmosDiagnostics, OperationType operationType, int statusCode, int subStatusCode, String ruleId, boolean canRetryOnFaultInjectedError) throws JsonProcessingException { List<ObjectNode> diagnosticsNode = new ArrayList<>(); if (operationType == OperationType.Query) { int clientSideDiagnosticsIndex = cosmosDiagnostics.toString().indexOf("[{\"userAgent\""); ArrayNode arrayNode = (ArrayNode) Utils.getSimpleObjectMapper().readTree(cosmosDiagnostics.toString().substring(clientSideDiagnosticsIndex)); for (JsonNode node : arrayNode) { diagnosticsNode.add((ObjectNode) node); } } else { diagnosticsNode.add((ObjectNode) Utils.getSimpleObjectMapper().readTree(cosmosDiagnostics.toString())); } for (ObjectNode diagnosticNode : diagnosticsNode) { JsonNode responseStatisticsList = diagnosticNode.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); if (canRetryOnFaultInjectedError) { assertThat(responseStatisticsList.size()).isEqualTo(2); } else { assertThat(responseStatisticsList.size()).isOne(); } JsonNode storeResult = responseStatisticsList.get(0).get("storeResult"); assertThat(storeResult).isNotNull(); assertThat(storeResult.get("statusCode").asInt()).isEqualTo(statusCode); assertThat(storeResult.get("subStatusCode").asInt()).isEqualTo(subStatusCode); assertThat(storeResult.get("faultInjectionRuleId").asText()).isEqualTo(ruleId); } } private void validateNoFaultInjectionApplied( CosmosDiagnostics cosmosDiagnostics, OperationType operationType, String faultInjectionNonApplicableReason) throws JsonProcessingException { List<ObjectNode> diagnosticsNode = new ArrayList<>(); if (operationType == OperationType.Query) { int clientSideDiagnosticsIndex = cosmosDiagnostics.toString().indexOf("[{\"userAgent\""); ArrayNode arrayNode = (ArrayNode) Utils.getSimpleObjectMapper().readTree(cosmosDiagnostics.toString().substring(clientSideDiagnosticsIndex)); for (JsonNode node : arrayNode) { diagnosticsNode.add((ObjectNode) node); } } else { diagnosticsNode.add((ObjectNode) Utils.getSimpleObjectMapper().readTree(cosmosDiagnostics.toString())); } for (ObjectNode diagnosticNode : diagnosticsNode) { JsonNode responseStatisticsList = diagnosticNode.get("responseStatisticsList"); assertThat(responseStatisticsList.isArray()).isTrue(); for (int i = 0; i < responseStatisticsList.size(); i++) { JsonNode storeResult = responseStatisticsList.get(i).get("storeResult"); assertThat(storeResult.get("faultInjectionRuleId")).isNull(); assertThat(storeResult.get("faultInjectionEvaluationResults")).isNotNull(); assertThat(storeResult.get("faultInjectionEvaluationResults").toString().contains(faultInjectionNonApplicableReason)); } assertThat(responseStatisticsList.size()).isOne(); } } private Map<String, String> getRegionMap(DatabaseAccount databaseAccount, boolean writeOnly) { Iterator<DatabaseAccountLocation> locationIterator = writeOnly ? databaseAccount.getWritableLocations().iterator() : databaseAccount.getReadableLocations().iterator(); Map<String, String> regionMap = new ConcurrentHashMap<>(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); regionMap.put(accountLocation.getName(), accountLocation.getEndpoint()); } return regionMap; } private void validateHitCount( FaultInjectionRule rule, long totalHitCount, OperationType operationType, ResourceType resourceType) { assertThat(rule.getHitCount()).isEqualTo(totalHitCount); if (totalHitCount > 0) { assertThat(rule.getHitCountDetails().size()).isEqualTo(1); assertThat(rule.getHitCountDetails().get(operationType.toString() + "-" + resourceType.toString())).isEqualTo(totalHitCount); } } }
If the previous comment isn't done and both `ReadBehavior` and `WriteBehavior` are accepted in the constructor, this should also throw if both are null.
public StorageSeekableByteChannel(int chunkSize, ReadBehavior readBehavior, WriteBehavior writeBehavior) { if (readBehavior != null && writeBehavior != null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "StorageSeekableByteChannel can have only one of readBehavior or writeBehavior.")); } buffer = ByteBuffer.allocate(chunkSize); this.readBehavior = readBehavior; this.writeBehavior = writeBehavior; bufferAbsolutePosition = 0; if (readBehavior != null) { buffer.limit(0); } }
if (readBehavior != null && writeBehavior != null) {
public StorageSeekableByteChannel(int chunkSize, ReadBehavior readBehavior, WriteBehavior writeBehavior) { if (readBehavior != null && writeBehavior != null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "StorageSeekableByteChannel can have only one of readBehavior or writeBehavior.")); } buffer = ByteBuffer.allocate(chunkSize); this.readBehavior = readBehavior; this.writeBehavior = writeBehavior; bufferAbsolutePosition = 0; if (readBehavior != null) { buffer.limit(0); } }
class StorageSeekableByteChannel implements SeekableByteChannel { private static final ClientLogger LOGGER = new ClientLogger(StorageSeekableByteChannel.class); /** * Interface for injectable behavior to read from a backing Storage resource. */ public interface ReadBehavior { /** * Reads n bytes from the backing resource, where {@code 0 <= n <= dst.remaining()}. * Emulates behavior of {@link java.nio.channels.ReadableByteChannel * * @param dst Destination to read the resource into. * @param sourceOffset Offset to read from the resource. * @return Number of bytes read from the resource, possibly zero, or -1 end of resource. * @see java.nio.channels.ReadableByteChannel */ int read(ByteBuffer dst, long sourceOffset); /** * Gets the length of the resource. The returned value may have been cached from previous operations on this * instance. * @return The length in bytes. */ long getResourceLength(); } /** * Interface for injectable behavior to write to a backing Storage resource. */ public interface WriteBehavior { /** * Writes to the backing resource. * @param src Bytes to write. * @param destOffset Offset of backing resource to write the bytes at. */ void write(ByteBuffer src, long destOffset); /** * Calls any necessary commit/flush calls on the backing resource. * @param totalLength Total length of the bytes being committed (necessary for some resource types). */ void commit(long totalLength); /** * Determines whether the write behavior can support a random seek to this position. May fetch information * from the service to determine if possible. * @param position Desired seek position. * @return Whether the resource supports this. */ boolean canSeek(long position); /** * Changes the size of the backing resource, if supported. * @param newSize New size of backing resource. * @throws UnsupportedOperationException If operation is not supported by the backing resource. */ void resize(long newSize); } private final ReadBehavior readBehavior; private final WriteBehavior writeBehavior; private boolean isClosed; private ByteBuffer buffer; private long bufferAbsolutePosition; private long absolutePosition; /** * Constructs an instance of this class. * @param chunkSize Size of the internal channel buffer to use for data transfer, and for individual REST transfers. * @param readBehavior Behavior for reading from the backing Storage resource. * @param writeBehavior Behavior for writing to the backing Storage resource. * @throws IllegalArgumentException If both read and write behavior are given. */ /** * Gets the read-behavior used by this channel. * @return {@link ReadBehavior} of this channel. */ public ReadBehavior getReadBehavior() { return readBehavior; } /** * Gets the write-behavior used by this channel. * @return {@link WriteBehavior} of this channel. */ public WriteBehavior getWriteBehavior() { return writeBehavior; } @Override public int read(ByteBuffer dst) throws IOException { assertOpen(); assertCanRead(); if (dst.isReadOnly()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("ByteBuffer dst must support writes.")); } if (buffer.remaining() == 0) { if (refillReadBuffer(absolutePosition) == -1) { absolutePosition = readBehavior.getResourceLength(); return -1; } } if (buffer.remaining() == 0) { return 0; } int read = Math.min(buffer.remaining(), dst.remaining()); ByteBuffer temp = buffer.duplicate(); temp.limit(temp.position() + read); dst.put(temp); buffer.position(buffer.position() + read); absolutePosition += read; return read; } private int refillReadBuffer(long newBufferAbsolutePosition) { buffer.clear(); int read = readBehavior.read(buffer, newBufferAbsolutePosition); buffer.rewind(); buffer.limit(Math.max(read, 0)); bufferAbsolutePosition = Math.min(newBufferAbsolutePosition, readBehavior.getResourceLength()); return read; } @Override public int write(ByteBuffer src) throws IOException { assertOpen(); assertCanWrite(); int write = Math.min(src.remaining(), buffer.remaining()); if (write > 0) { ByteBuffer temp = src.duplicate(); temp.limit(temp.position() + write); buffer.put(temp); src.position(src.position() + write); } if (buffer.remaining() == 0) { try { flushWriteBuffer(); } catch (RuntimeException e) { buffer.position(buffer.position() - write); throw LOGGER.logExceptionAsError(e); } } absolutePosition += write; return write; } private void flushWriteBuffer() { if (buffer.position() == 0) { return; } int startingPosition = buffer.position(); buffer.limit(buffer.position()); buffer.rewind(); try { writeBehavior.write(buffer, bufferAbsolutePosition); } catch (RuntimeException e) { buffer.limit(buffer.capacity()); buffer.position(startingPosition); throw LOGGER.logExceptionAsError(e); } bufferAbsolutePosition += buffer.limit(); buffer.clear(); } @Override public long position() throws IOException { assertOpen(); return absolutePosition; } @Override public SeekableByteChannel position(long newPosition) throws IOException { assertOpen(); if (readBehavior != null) { readModeSeek(newPosition); } else { writeModeSeek(newPosition); } return this; } private void readModeSeek(long newPosition) { if (newPosition < bufferAbsolutePosition || newPosition > bufferAbsolutePosition + buffer.limit()) { buffer.clear(); buffer.limit(0); } else { buffer.position((int) (newPosition - bufferAbsolutePosition)); } absolutePosition = newPosition; } private void writeModeSeek(long newPosition) { if (!writeBehavior.canSeek(newPosition)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "The backing resource does not support this position change.")); } flushWriteBuffer(); absolutePosition = newPosition; bufferAbsolutePosition = newPosition; } @Override public long size() throws IOException { assertOpen(); if (readBehavior != null) { return readBehavior.getResourceLength(); } else { return absolutePosition; } } @Override public SeekableByteChannel truncate(long size) throws IOException { assertOpen(); writeBehavior.resize(size); return this; } @Override public boolean isOpen() { return !isClosed; } @Override public void close() throws IOException { if (writeBehavior != null) { flushWriteBuffer(); writeBehavior.commit(absolutePosition); } isClosed = true; buffer = null; } private void assertCanRead() { if (readBehavior == null) { throw LOGGER.logExceptionAsError(new NonReadableChannelException()); } } private void assertCanWrite() { if (writeBehavior == null) { throw LOGGER.logExceptionAsError(new NonWritableChannelException()); } } private void assertOpen() throws ClosedChannelException { if (isClosed) { throw LOGGER.logThrowableAsError(new ClosedChannelException()); } } }
class StorageSeekableByteChannel implements SeekableByteChannel { private static final ClientLogger LOGGER = new ClientLogger(StorageSeekableByteChannel.class); /** * Interface for injectable behavior to read from a backing Storage resource. */ public interface ReadBehavior { /** * Reads n bytes from the backing resource, where {@code 0 <= n <= dst.remaining()}. * Emulates behavior of {@link java.nio.channels.ReadableByteChannel * * @param dst Destination to read the resource into. * @param sourceOffset Offset to read from the resource. * @return Number of bytes read from the resource, possibly zero, or -1 end of resource. * @see java.nio.channels.ReadableByteChannel */ int read(ByteBuffer dst, long sourceOffset); /** * Gets the length of the resource. The returned value may have been cached from previous operations on this * instance. * @return The length in bytes. */ long getResourceLength(); } /** * Interface for injectable behavior to write to a backing Storage resource. */ public interface WriteBehavior { /** * Writes to the backing resource. * @param src Bytes to write. * @param destOffset Offset of backing resource to write the bytes at. */ void write(ByteBuffer src, long destOffset); /** * Calls any necessary commit/flush calls on the backing resource. * @param totalLength Total length of the bytes being committed (necessary for some resource types). */ void commit(long totalLength); /** * Determines whether the write behavior can support a random seek to this position. May fetch information * from the service to determine if possible. * @param position Desired seek position. * @return Whether the resource supports this. */ boolean canSeek(long position); /** * Changes the size of the backing resource, if supported. * @param newSize New size of backing resource. * @throws UnsupportedOperationException If operation is not supported by the backing resource. */ void resize(long newSize); } private final ReadBehavior readBehavior; private final WriteBehavior writeBehavior; private boolean isClosed; private ByteBuffer buffer; private long bufferAbsolutePosition; private long absolutePosition; /** * Constructs an instance of this class. * @param chunkSize Size of the internal channel buffer to use for data transfer, and for individual REST transfers. * @param readBehavior Behavior for reading from the backing Storage resource. * @param writeBehavior Behavior for writing to the backing Storage resource. * @throws IllegalArgumentException If both read and write behavior are given. */ /** * Gets the read-behavior used by this channel. * @return {@link ReadBehavior} of this channel. */ public ReadBehavior getReadBehavior() { return readBehavior; } /** * Gets the write-behavior used by this channel. * @return {@link WriteBehavior} of this channel. */ public WriteBehavior getWriteBehavior() { return writeBehavior; } @Override public int read(ByteBuffer dst) throws IOException { assertOpen(); assertCanRead(); if (dst.isReadOnly()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("ByteBuffer dst must support writes.")); } if (buffer.remaining() == 0) { if (refillReadBuffer(absolutePosition) == -1) { absolutePosition = readBehavior.getResourceLength(); return -1; } } if (buffer.remaining() == 0) { return 0; } int read = Math.min(buffer.remaining(), dst.remaining()); ByteBuffer temp = buffer.duplicate(); temp.limit(temp.position() + read); dst.put(temp); buffer.position(buffer.position() + read); absolutePosition += read; return read; } private int refillReadBuffer(long newBufferAbsolutePosition) { buffer.clear(); int read = readBehavior.read(buffer, newBufferAbsolutePosition); buffer.rewind(); buffer.limit(Math.max(read, 0)); bufferAbsolutePosition = Math.min(newBufferAbsolutePosition, readBehavior.getResourceLength()); return read; } @Override public int write(ByteBuffer src) throws IOException { assertOpen(); assertCanWrite(); int write = Math.min(src.remaining(), buffer.remaining()); if (write > 0) { ByteBuffer temp = src.duplicate(); temp.limit(temp.position() + write); buffer.put(temp); src.position(src.position() + write); } if (buffer.remaining() == 0) { try { flushWriteBuffer(); } catch (RuntimeException e) { buffer.position(buffer.position() - write); throw LOGGER.logExceptionAsError(e); } } absolutePosition += write; return write; } private void flushWriteBuffer() { if (buffer.position() == 0) { return; } int startingPosition = buffer.position(); buffer.limit(buffer.position()); buffer.rewind(); try { writeBehavior.write(buffer, bufferAbsolutePosition); } catch (RuntimeException e) { buffer.limit(buffer.capacity()); buffer.position(startingPosition); throw LOGGER.logExceptionAsError(e); } bufferAbsolutePosition += buffer.limit(); buffer.clear(); } @Override public long position() throws IOException { assertOpen(); return absolutePosition; } @Override public SeekableByteChannel position(long newPosition) throws IOException { assertOpen(); if (readBehavior != null) { readModeSeek(newPosition); } else { writeModeSeek(newPosition); } return this; } private void readModeSeek(long newPosition) { if (newPosition < bufferAbsolutePosition || newPosition > bufferAbsolutePosition + buffer.limit()) { buffer.clear(); buffer.limit(0); } else { buffer.position((int) (newPosition - bufferAbsolutePosition)); } absolutePosition = newPosition; } private void writeModeSeek(long newPosition) { if (!writeBehavior.canSeek(newPosition)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "The backing resource does not support this position change.")); } flushWriteBuffer(); absolutePosition = newPosition; bufferAbsolutePosition = newPosition; } @Override public long size() throws IOException { assertOpen(); if (readBehavior != null) { return readBehavior.getResourceLength(); } else { return absolutePosition; } } @Override public SeekableByteChannel truncate(long size) throws IOException { assertOpen(); writeBehavior.resize(size); return this; } @Override public boolean isOpen() { return !isClosed; } @Override public void close() throws IOException { if (writeBehavior != null) { flushWriteBuffer(); writeBehavior.commit(absolutePosition); } isClosed = true; buffer = null; } private void assertCanRead() { if (readBehavior == null) { throw LOGGER.logExceptionAsError(new NonReadableChannelException()); } } private void assertCanWrite() { if (writeBehavior == null) { throw LOGGER.logExceptionAsError(new NonWritableChannelException()); } } private void assertOpen() throws ClosedChannelException { if (isClosed) { throw LOGGER.logThrowableAsError(new ClosedChannelException()); } } }
Should throw if `chunkSize` is less than 1, `ByteBuffer.allocate` likely does this but we can make the error message indicate what is wrong better if we handle it.
public StorageSeekableByteChannel(int chunkSize, ReadBehavior readBehavior, WriteBehavior writeBehavior) { if (readBehavior != null && writeBehavior != null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "StorageSeekableByteChannel can have only one of readBehavior or writeBehavior.")); } buffer = ByteBuffer.allocate(chunkSize); this.readBehavior = readBehavior; this.writeBehavior = writeBehavior; bufferAbsolutePosition = 0; if (readBehavior != null) { buffer.limit(0); } }
buffer = ByteBuffer.allocate(chunkSize);
public StorageSeekableByteChannel(int chunkSize, ReadBehavior readBehavior, WriteBehavior writeBehavior) { if (readBehavior != null && writeBehavior != null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "StorageSeekableByteChannel can have only one of readBehavior or writeBehavior.")); } buffer = ByteBuffer.allocate(chunkSize); this.readBehavior = readBehavior; this.writeBehavior = writeBehavior; bufferAbsolutePosition = 0; if (readBehavior != null) { buffer.limit(0); } }
class StorageSeekableByteChannel implements SeekableByteChannel { private static final ClientLogger LOGGER = new ClientLogger(StorageSeekableByteChannel.class); /** * Interface for injectable behavior to read from a backing Storage resource. */ public interface ReadBehavior { /** * Reads n bytes from the backing resource, where {@code 0 <= n <= dst.remaining()}. * Emulates behavior of {@link java.nio.channels.ReadableByteChannel * * @param dst Destination to read the resource into. * @param sourceOffset Offset to read from the resource. * @return Number of bytes read from the resource, possibly zero, or -1 end of resource. * @see java.nio.channels.ReadableByteChannel */ int read(ByteBuffer dst, long sourceOffset); /** * Gets the length of the resource. The returned value may have been cached from previous operations on this * instance. * @return The length in bytes. */ long getResourceLength(); } /** * Interface for injectable behavior to write to a backing Storage resource. */ public interface WriteBehavior { /** * Writes to the backing resource. * @param src Bytes to write. * @param destOffset Offset of backing resource to write the bytes at. */ void write(ByteBuffer src, long destOffset); /** * Calls any necessary commit/flush calls on the backing resource. * @param totalLength Total length of the bytes being committed (necessary for some resource types). */ void commit(long totalLength); /** * Determines whether the write behavior can support a random seek to this position. May fetch information * from the service to determine if possible. * @param position Desired seek position. * @return Whether the resource supports this. */ boolean canSeek(long position); /** * Changes the size of the backing resource, if supported. * @param newSize New size of backing resource. * @throws UnsupportedOperationException If operation is not supported by the backing resource. */ void resize(long newSize); } private final ReadBehavior readBehavior; private final WriteBehavior writeBehavior; private boolean isClosed; private ByteBuffer buffer; private long bufferAbsolutePosition; private long absolutePosition; /** * Constructs an instance of this class. * @param chunkSize Size of the internal channel buffer to use for data transfer, and for individual REST transfers. * @param readBehavior Behavior for reading from the backing Storage resource. * @param writeBehavior Behavior for writing to the backing Storage resource. * @throws IllegalArgumentException If both read and write behavior are given. */ /** * Gets the read-behavior used by this channel. * @return {@link ReadBehavior} of this channel. */ public ReadBehavior getReadBehavior() { return readBehavior; } /** * Gets the write-behavior used by this channel. * @return {@link WriteBehavior} of this channel. */ public WriteBehavior getWriteBehavior() { return writeBehavior; } @Override public int read(ByteBuffer dst) throws IOException { assertOpen(); assertCanRead(); if (dst.isReadOnly()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("ByteBuffer dst must support writes.")); } if (buffer.remaining() == 0) { if (refillReadBuffer(absolutePosition) == -1) { absolutePosition = readBehavior.getResourceLength(); return -1; } } if (buffer.remaining() == 0) { return 0; } int read = Math.min(buffer.remaining(), dst.remaining()); ByteBuffer temp = buffer.duplicate(); temp.limit(temp.position() + read); dst.put(temp); buffer.position(buffer.position() + read); absolutePosition += read; return read; } private int refillReadBuffer(long newBufferAbsolutePosition) { buffer.clear(); int read = readBehavior.read(buffer, newBufferAbsolutePosition); buffer.rewind(); buffer.limit(Math.max(read, 0)); bufferAbsolutePosition = Math.min(newBufferAbsolutePosition, readBehavior.getResourceLength()); return read; } @Override public int write(ByteBuffer src) throws IOException { assertOpen(); assertCanWrite(); int write = Math.min(src.remaining(), buffer.remaining()); if (write > 0) { ByteBuffer temp = src.duplicate(); temp.limit(temp.position() + write); buffer.put(temp); src.position(src.position() + write); } if (buffer.remaining() == 0) { try { flushWriteBuffer(); } catch (RuntimeException e) { buffer.position(buffer.position() - write); throw LOGGER.logExceptionAsError(e); } } absolutePosition += write; return write; } private void flushWriteBuffer() { if (buffer.position() == 0) { return; } int startingPosition = buffer.position(); buffer.limit(buffer.position()); buffer.rewind(); try { writeBehavior.write(buffer, bufferAbsolutePosition); } catch (RuntimeException e) { buffer.limit(buffer.capacity()); buffer.position(startingPosition); throw LOGGER.logExceptionAsError(e); } bufferAbsolutePosition += buffer.limit(); buffer.clear(); } @Override public long position() throws IOException { assertOpen(); return absolutePosition; } @Override public SeekableByteChannel position(long newPosition) throws IOException { assertOpen(); if (readBehavior != null) { readModeSeek(newPosition); } else { writeModeSeek(newPosition); } return this; } private void readModeSeek(long newPosition) { if (newPosition < bufferAbsolutePosition || newPosition > bufferAbsolutePosition + buffer.limit()) { buffer.clear(); buffer.limit(0); } else { buffer.position((int) (newPosition - bufferAbsolutePosition)); } absolutePosition = newPosition; } private void writeModeSeek(long newPosition) { if (!writeBehavior.canSeek(newPosition)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "The backing resource does not support this position change.")); } flushWriteBuffer(); absolutePosition = newPosition; bufferAbsolutePosition = newPosition; } @Override public long size() throws IOException { assertOpen(); if (readBehavior != null) { return readBehavior.getResourceLength(); } else { return absolutePosition; } } @Override public SeekableByteChannel truncate(long size) throws IOException { assertOpen(); writeBehavior.resize(size); return this; } @Override public boolean isOpen() { return !isClosed; } @Override public void close() throws IOException { if (writeBehavior != null) { flushWriteBuffer(); writeBehavior.commit(absolutePosition); } isClosed = true; buffer = null; } private void assertCanRead() { if (readBehavior == null) { throw LOGGER.logExceptionAsError(new NonReadableChannelException()); } } private void assertCanWrite() { if (writeBehavior == null) { throw LOGGER.logExceptionAsError(new NonWritableChannelException()); } } private void assertOpen() throws ClosedChannelException { if (isClosed) { throw LOGGER.logThrowableAsError(new ClosedChannelException()); } } }
class StorageSeekableByteChannel implements SeekableByteChannel { private static final ClientLogger LOGGER = new ClientLogger(StorageSeekableByteChannel.class); /** * Interface for injectable behavior to read from a backing Storage resource. */ public interface ReadBehavior { /** * Reads n bytes from the backing resource, where {@code 0 <= n <= dst.remaining()}. * Emulates behavior of {@link java.nio.channels.ReadableByteChannel * * @param dst Destination to read the resource into. * @param sourceOffset Offset to read from the resource. * @return Number of bytes read from the resource, possibly zero, or -1 end of resource. * @see java.nio.channels.ReadableByteChannel */ int read(ByteBuffer dst, long sourceOffset); /** * Gets the length of the resource. The returned value may have been cached from previous operations on this * instance. * @return The length in bytes. */ long getResourceLength(); } /** * Interface for injectable behavior to write to a backing Storage resource. */ public interface WriteBehavior { /** * Writes to the backing resource. * @param src Bytes to write. * @param destOffset Offset of backing resource to write the bytes at. */ void write(ByteBuffer src, long destOffset); /** * Calls any necessary commit/flush calls on the backing resource. * @param totalLength Total length of the bytes being committed (necessary for some resource types). */ void commit(long totalLength); /** * Determines whether the write behavior can support a random seek to this position. May fetch information * from the service to determine if possible. * @param position Desired seek position. * @return Whether the resource supports this. */ boolean canSeek(long position); /** * Changes the size of the backing resource, if supported. * @param newSize New size of backing resource. * @throws UnsupportedOperationException If operation is not supported by the backing resource. */ void resize(long newSize); } private final ReadBehavior readBehavior; private final WriteBehavior writeBehavior; private boolean isClosed; private ByteBuffer buffer; private long bufferAbsolutePosition; private long absolutePosition; /** * Constructs an instance of this class. * @param chunkSize Size of the internal channel buffer to use for data transfer, and for individual REST transfers. * @param readBehavior Behavior for reading from the backing Storage resource. * @param writeBehavior Behavior for writing to the backing Storage resource. * @throws IllegalArgumentException If both read and write behavior are given. */ /** * Gets the read-behavior used by this channel. * @return {@link ReadBehavior} of this channel. */ public ReadBehavior getReadBehavior() { return readBehavior; } /** * Gets the write-behavior used by this channel. * @return {@link WriteBehavior} of this channel. */ public WriteBehavior getWriteBehavior() { return writeBehavior; } @Override public int read(ByteBuffer dst) throws IOException { assertOpen(); assertCanRead(); if (dst.isReadOnly()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("ByteBuffer dst must support writes.")); } if (buffer.remaining() == 0) { if (refillReadBuffer(absolutePosition) == -1) { absolutePosition = readBehavior.getResourceLength(); return -1; } } if (buffer.remaining() == 0) { return 0; } int read = Math.min(buffer.remaining(), dst.remaining()); ByteBuffer temp = buffer.duplicate(); temp.limit(temp.position() + read); dst.put(temp); buffer.position(buffer.position() + read); absolutePosition += read; return read; } private int refillReadBuffer(long newBufferAbsolutePosition) { buffer.clear(); int read = readBehavior.read(buffer, newBufferAbsolutePosition); buffer.rewind(); buffer.limit(Math.max(read, 0)); bufferAbsolutePosition = Math.min(newBufferAbsolutePosition, readBehavior.getResourceLength()); return read; } @Override public int write(ByteBuffer src) throws IOException { assertOpen(); assertCanWrite(); int write = Math.min(src.remaining(), buffer.remaining()); if (write > 0) { ByteBuffer temp = src.duplicate(); temp.limit(temp.position() + write); buffer.put(temp); src.position(src.position() + write); } if (buffer.remaining() == 0) { try { flushWriteBuffer(); } catch (RuntimeException e) { buffer.position(buffer.position() - write); throw LOGGER.logExceptionAsError(e); } } absolutePosition += write; return write; } private void flushWriteBuffer() { if (buffer.position() == 0) { return; } int startingPosition = buffer.position(); buffer.limit(buffer.position()); buffer.rewind(); try { writeBehavior.write(buffer, bufferAbsolutePosition); } catch (RuntimeException e) { buffer.limit(buffer.capacity()); buffer.position(startingPosition); throw LOGGER.logExceptionAsError(e); } bufferAbsolutePosition += buffer.limit(); buffer.clear(); } @Override public long position() throws IOException { assertOpen(); return absolutePosition; } @Override public SeekableByteChannel position(long newPosition) throws IOException { assertOpen(); if (readBehavior != null) { readModeSeek(newPosition); } else { writeModeSeek(newPosition); } return this; } private void readModeSeek(long newPosition) { if (newPosition < bufferAbsolutePosition || newPosition > bufferAbsolutePosition + buffer.limit()) { buffer.clear(); buffer.limit(0); } else { buffer.position((int) (newPosition - bufferAbsolutePosition)); } absolutePosition = newPosition; } private void writeModeSeek(long newPosition) { if (!writeBehavior.canSeek(newPosition)) { throw LOGGER.logExceptionAsError(new IllegalArgumentException( "The backing resource does not support this position change.")); } flushWriteBuffer(); absolutePosition = newPosition; bufferAbsolutePosition = newPosition; } @Override public long size() throws IOException { assertOpen(); if (readBehavior != null) { return readBehavior.getResourceLength(); } else { return absolutePosition; } } @Override public SeekableByteChannel truncate(long size) throws IOException { assertOpen(); writeBehavior.resize(size); return this; } @Override public boolean isOpen() { return !isClosed; } @Override public void close() throws IOException { if (writeBehavior != null) { flushWriteBuffer(); writeBehavior.commit(absolutePosition); } isClosed = true; buffer = null; } private void assertCanRead() { if (readBehavior == null) { throw LOGGER.logExceptionAsError(new NonReadableChannelException()); } } private void assertCanWrite() { if (writeBehavior == null) { throw LOGGER.logExceptionAsError(new NonWritableChannelException()); } } private void assertOpen() throws ClosedChannelException { if (isClosed) { throw LOGGER.logThrowableAsError(new ClosedChannelException()); } } }